code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
"""Base quality score recalibration."""
import os
import shutil
from resolwe.process import (
BooleanField,
Cmd,
DataField,
FileField,
GroupField,
IntegerField,
ListField,
Process,
SchedulingClass,
StringField,
)
class BQSR(Process):
"""A two pass process of BaseRecalibrator and ApplyBQSR from GATK.
See GATK website for more information on BaseRecalibrator.
It is possible to modify read group using GATK's AddOrReplaceGroups through Replace read groups in BAM
(``read_group``) input field.
"""
slug = "bqsr"
name = "BaseQualityScoreRecalibrator"
process_type = "data:alignment:bam:bqsr:"
version = "2.3.0"
category = "BAM processing"
scheduling_class = SchedulingClass.BATCH
entity = {"type": "sample"}
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/dnaseq:6.3.1"}
},
}
data_name = '{{ bam|sample_name|default("?") }}'
class Input:
"""Input fields to perform Base quality score recalibration."""
bam = DataField("alignment:bam", label="BAM file containing reads")
reference = DataField("seq:nucleotide", label="Reference genome file")
known_sites = ListField(
DataField(
data_type="variants:vcf",
description="One or more databases of known polymorphic sites used to exclude regions around known "
"polymorphisms from analysis.",
),
label="List of known sites of variation",
)
intervals = DataField(
data_type="bed",
required=False,
label="One or more genomic intervals over which to operate.",
description="This field is optional, but it can speed up the process by restricting calculations to "
"specific genome regions.",
)
read_group = StringField(
label="Replace read groups in BAM",
description="Replace read groups in a BAM file.This argument enables the user to replace all read groups "
"in the INPUT file with a single new read group and assign all reads to this read group in "
"the OUTPUT BAM file. Addition or replacement is performed using Picard's "
"AddOrReplaceReadGroups tool. Input should take the form of -name=value delimited by a "
'";", e.g. "-ID=1;-LB=GENIALIS;-PL=ILLUMINA;-PU=BARCODE;-SM=SAMPLENAME1". See tool\'s '
"documentation for more information on tag names. Note that PL, LB, PU and SM are require "
"fields. See caveats of rewriting read groups in the documentation.",
default="",
)
validation_stringency = StringField(
label="Validation stringency",
description="Validation stringency for all SAM files read by this program. Setting stringency to SILENT "
"can improve performance when processing a BAM file in which variable-length data (read, "
"qualities, tags) do not otherwise need to be decoded. Default is STRICT. This setting is "
"used in BaseRecalibrator and ApplyBQSR processes.",
choices=[
("STRICT", "STRICT"),
("LENIENT", "LENIENT"),
("SILENT", "SILENT"),
],
default="STRICT",
)
class Advanced:
"""Advanced options."""
use_original_qualities = BooleanField(
label="Use the base quality scores from the OQ tag",
description="This flag tells GATK to use the original base qualities "
"(that were in the data before BQSR/recalibration) which are stored in the OQ tag, if they are "
"present, rather than use the post-recalibration quality scores. If no OQ tag is present for a "
"read, the standard qual score will be used.",
default=False,
)
java_gc_threads = IntegerField(
label="Java ParallelGCThreads",
default=2,
description="Sets the number of threads used during parallel phases of the garbage collectors.",
)
max_heap_size = IntegerField(
label="Java maximum heap size (Xmx)",
default=12,
description="Set the maximum Java heap size (in GB).",
)
advanced = GroupField(Advanced, label="Advanced options")
class Output:
"""Output fields to BaseQualityScoreRecalibrator."""
bam = FileField(label="Base quality score recalibrated BAM file")
bai = FileField(label="Index of base quality score recalibrated BAM file")
stats = FileField(label="Alignment statistics")
bigwig = FileField(label="BigWig file", required=False)
species = StringField(label="Species")
build = StringField(label="Build")
recal_table = FileField(label="Recalibration tabled")
def run(self, inputs, outputs):
"""Run the analysis."""
# Prepare output file names.
bam = os.path.basename(inputs.bam.output.bam.path)
file_name = os.path.splitext(os.path.basename(inputs.bam.output.bam.path))[0]
bam_rg = f"{file_name}_RG.bam"
species = inputs.bam.output.species
gc_threads = min(
self.requirements.resources.cores, inputs.advanced.java_gc_threads
)
# Parse read_group argument from a string, delimited by a ; and =
# into a form that will be accepted by AddOrReplaceReadGroups tool.
# E.g. '-LB=DAB;-PL=Illumina;-PU=barcode;-SM=sample1' should become
# ['-LB', 'DAB', '-PL', 'Illumina', '-PU', 'barcode', '-SM', 'sample1']
# prepended by INPUT and OUTPUT.
if inputs.read_group:
arrg = [
"--java-options",
f"-XX:ParallelGCThreads={gc_threads} -Xmx{inputs.advanced.max_heap_size}g",
"--INPUT",
f"{inputs.bam.output.bam.path}",
"--VALIDATION_STRINGENCY",
f"{inputs.validation_stringency}",
"--OUTPUT",
f"{bam_rg}",
]
present_tags = []
for x in inputs.read_group.split(";"):
split_tag = x.split("=")
arrg.extend(split_tag)
present_tags.append(split_tag[0])
# Make sure all arguments to read_group are valid.
all_tags = {
"-LB",
"-PL",
"-PU",
"-SM",
"-CN",
"-DS",
"-DT",
"-FO",
"-ID",
"-KS",
"-PG",
"-PI",
"-PM",
"-SO",
}
present_tag_set = set(present_tags)
check_all_tags = present_tag_set.issubset(all_tags)
if not check_all_tags:
self.error("One or more read_group argument(s) improperly formatted.")
# Check that there are no double entries of arguments to read_group.
if len(present_tag_set) != len(present_tags):
self.error("You have duplicate tags in read_group argument.")
# Check that all mandatory arguments to read_group are present.
mandatory_tags = {"-LB", "-PL", "-PU", "-SM"}
check_tags = mandatory_tags.issubset(present_tag_set)
if not check_tags:
self.error(
"Missing mandatory read_group argument(s) (-PL, -LB, -PU and -SM are mandatory)."
)
Cmd["gatk"]["AddOrReplaceReadGroups"](arrg)
else:
shutil.copy2(inputs.bam.output.bam.path, bam_rg)
# Make sure the file is indexed.
Cmd["samtools"]["index"](bam_rg)
recal_table = f"{file_name}_recalibration.table"
br_inputs = [
"--java-options",
f"-XX:ParallelGCThreads={gc_threads} -Xmx{inputs.advanced.max_heap_size}g",
"--input",
f"{bam_rg}",
"--output",
f"{recal_table}",
"--reference",
f"{inputs.reference.output.fasta.path}",
"--read-validation-stringency",
f"{inputs.validation_stringency}",
]
if inputs.intervals:
br_inputs.extend(["--intervals", f"{inputs.intervals.output.bed.path}"])
if inputs.advanced.use_original_qualities:
br_inputs.append("--use-original-qualities")
# Add known sites to the input parameters of BaseRecalibrator.
for site in inputs.known_sites:
br_inputs.extend(["--known-sites", f"{site.output.vcf.path}"])
# Prepare bqsr recalibration file.
Cmd["gatk"]["BaseRecalibrator"](br_inputs)
self.progress(0.5)
# Apply base recalibration.
ab_inputs = [
"--java-options",
f"-XX:ParallelGCThreads={gc_threads} -Xmx{inputs.advanced.max_heap_size}g",
"--input",
f"{bam_rg}",
"--output",
f"{bam}",
"--reference",
f"{inputs.reference.output.fasta.path}",
"--bqsr-recal-file",
f"{recal_table}",
"--read-validation-stringency",
f"{inputs.validation_stringency}",
]
if inputs.advanced.use_original_qualities:
ab_inputs.append("--use-original-qualities")
Cmd["gatk"]["ApplyBQSR"](ab_inputs)
stats = f"{bam}_stats.txt"
(Cmd["samtools"]["flagstat"][f"{bam}"] > stats)()
self.progress(0.8)
btb_inputs = [f"{bam}", f"{species}", f"{self.requirements.resources.cores}"]
Cmd["bamtobigwig.sh"](btb_inputs)
bigwig = file_name + ".bw"
if not os.path.exists(bigwig):
self.info("BigWig file not calculated.")
else:
outputs.bigwig = bigwig
self.progress(0.9)
outputs.bam = bam
outputs.bai = file_name + ".bai"
outputs.stats = stats
outputs.species = species
outputs.build = inputs.bam.output.build
outputs.recal_table = recal_table
| genialis/resolwe-bio | resolwe_bio/processes/reads_processing/bqsr.py | Python | apache-2.0 | 10,334 |
# encoding: utf-8
from yast import import_module
import_module('UI')
from yast import *
class Heading2Client:
def main(self):
UI.OpenDialog(
VBox(
Heading("This Is a Heading."),
Label("This is a Label."),
PushButton("&OK")
)
)
UI.UserInput()
UI.CloseDialog()
Heading2Client().main()
| yast/yast-python-bindings | examples/Heading2.py | Python | gpl-2.0 | 361 |
from unittest import TestCase
from six.moves import range
from .symmath_check import symmath_check
class SymmathCheckTest(TestCase):
def test_symmath_check_integers(self):
number_list = [i for i in range(-100, 100)]
self._symmath_check_numbers(number_list)
def test_symmath_check_floats(self):
number_list = [i + 0.01 for i in range(-100, 100)]
self._symmath_check_numbers(number_list)
def test_symmath_check_same_symbols(self):
expected_str = "x+2*y"
dynamath = '''
<math xmlns="http://www.w3.org/1998/Math/MathML">
<mstyle displaystyle="true">
<mrow>
<mi>x</mi>
<mo>+</mo>
<mn>2</mn>
<mo>*</mo>
<mi>y</mi>
</mrow>
</mstyle>
</math>'''.strip()
# Expect that the exact same symbolic string is marked correct
result = symmath_check(expected_str, expected_str, dynamath=[dynamath])
self.assertTrue('ok' in result and result['ok'])
def test_symmath_check_equivalent_symbols(self):
expected_str = "x+2*y"
input_str = "x+y+y"
dynamath = '''
<math xmlns="http://www.w3.org/1998/Math/MathML">
<mstyle displaystyle="true">
<mrow>
<mi>x</mi>
<mo>+</mo>
<mi>y</mi>
<mo>+</mo>
<mi>y</mi>
</mrow>
</mstyle>
</math>'''.strip()
# Expect that equivalent symbolic strings are marked correct
result = symmath_check(expected_str, input_str, dynamath=[dynamath])
self.assertTrue('ok' in result and result['ok'])
def test_symmath_check_different_symbols(self):
expected_str = "0"
input_str = "x+y"
dynamath = '''
<math xmlns="http://www.w3.org/1998/Math/MathML">
<mstyle displaystyle="true">
<mrow>
<mi>x</mi>
<mo>+</mo>
<mi>y</mi>
</mrow>
</mstyle>
</math>'''.strip()
# Expect that an incorrect response is marked incorrect
result = symmath_check(expected_str, input_str, dynamath=[dynamath])
self.assertTrue('ok' in result and not result['ok'])
self.assertNotIn('fail', result['msg'])
def _symmath_check_numbers(self, number_list):
for n in number_list:
# expect = ans, so should say the answer is correct
expect = n
ans = n
result = symmath_check(str(expect), str(ans))
self.assertTrue('ok' in result and result['ok'],
"%f should == %f" % (expect, ans))
# Change expect so that it != ans
expect += 0.1
result = symmath_check(str(expect), str(ans))
self.assertTrue('ok' in result and not result['ok'],
"%f should != %f" % (expect, ans))
| msegado/edx-platform | common/lib/symmath/symmath/test_symmath_check.py | Python | agpl-3.0 | 2,677 |
"""
Interface for an output.
"""
from abc import ABCMeta, abstractmethod
from typing import Optional, TextIO
from prompt_toolkit.data_structures import Size
from prompt_toolkit.styles import Attrs
from .color_depth import ColorDepth
__all__ = [
"Output",
"DummyOutput",
]
class Output(metaclass=ABCMeta):
"""
Base class defining the output interface for a
:class:`~prompt_toolkit.renderer.Renderer`.
Actual implementations are
:class:`~prompt_toolkit.output.vt100.Vt100_Output` and
:class:`~prompt_toolkit.output.win32.Win32Output`.
"""
stdout: Optional[TextIO] = None
@abstractmethod
def fileno(self) -> int:
" Return the file descriptor to which we can write for the output. "
@abstractmethod
def encoding(self) -> str:
"""
Return the encoding for this output, e.g. 'utf-8'.
(This is used mainly to know which characters are supported by the
output the data, so that the UI can provide alternatives, when
required.)
"""
@abstractmethod
def write(self, data: str) -> None:
" Write text (Terminal escape sequences will be removed/escaped.) "
@abstractmethod
def write_raw(self, data: str) -> None:
" Write text. "
@abstractmethod
def set_title(self, title: str) -> None:
" Set terminal title. "
@abstractmethod
def clear_title(self) -> None:
" Clear title again. (or restore previous title.) "
@abstractmethod
def flush(self) -> None:
" Write to output stream and flush. "
@abstractmethod
def erase_screen(self) -> None:
"""
Erases the screen with the background colour and moves the cursor to
home.
"""
@abstractmethod
def enter_alternate_screen(self) -> None:
" Go to the alternate screen buffer. (For full screen applications). "
@abstractmethod
def quit_alternate_screen(self) -> None:
" Leave the alternate screen buffer. "
@abstractmethod
def enable_mouse_support(self) -> None:
" Enable mouse. "
@abstractmethod
def disable_mouse_support(self) -> None:
" Disable mouse. "
@abstractmethod
def erase_end_of_line(self) -> None:
"""
Erases from the current cursor position to the end of the current line.
"""
@abstractmethod
def erase_down(self) -> None:
"""
Erases the screen from the current line down to the bottom of the
screen.
"""
@abstractmethod
def reset_attributes(self) -> None:
" Reset color and styling attributes. "
@abstractmethod
def set_attributes(self, attrs: Attrs, color_depth: ColorDepth) -> None:
" Set new color and styling attributes. "
@abstractmethod
def disable_autowrap(self) -> None:
" Disable auto line wrapping. "
@abstractmethod
def enable_autowrap(self) -> None:
" Enable auto line wrapping. "
@abstractmethod
def cursor_goto(self, row: int = 0, column: int = 0) -> None:
" Move cursor position. "
@abstractmethod
def cursor_up(self, amount: int) -> None:
" Move cursor `amount` place up. "
@abstractmethod
def cursor_down(self, amount: int) -> None:
" Move cursor `amount` place down. "
@abstractmethod
def cursor_forward(self, amount: int) -> None:
" Move cursor `amount` place forward. "
@abstractmethod
def cursor_backward(self, amount: int) -> None:
" Move cursor `amount` place backward. "
@abstractmethod
def hide_cursor(self) -> None:
" Hide cursor. "
@abstractmethod
def show_cursor(self) -> None:
" Show cursor. "
def ask_for_cpr(self) -> None:
"""
Asks for a cursor position report (CPR).
(VT100 only.)
"""
@property
def responds_to_cpr(self) -> bool:
"""
`True` if the `Application` can expect to receive a CPR response after
calling `ask_for_cpr` (this will come back through the corresponding
`Input`).
This is used to determine the amount of available rows we have below
the cursor position. In the first place, we have this so that the drop
down autocompletion menus are sized according to the available space.
On Windows, we don't need this, there we have
`get_rows_below_cursor_position`.
"""
return False
@abstractmethod
def get_size(self) -> Size:
" Return the size of the output window. "
def bell(self) -> None:
" Sound bell. "
def enable_bracketed_paste(self) -> None:
" For vt100 only. "
def disable_bracketed_paste(self) -> None:
" For vt100 only. "
def reset_cursor_key_mode(self) -> None:
"""
For vt100 only.
Put the terminal in normal cursor mode (instead of application mode).
See: https://vt100.net/docs/vt100-ug/chapter3.html
"""
def scroll_buffer_to_prompt(self) -> None:
" For Win32 only. "
def get_rows_below_cursor_position(self) -> int:
" For Windows only. "
raise NotImplementedError
@abstractmethod
def get_default_color_depth(self) -> ColorDepth:
"""
Get default color depth for this output.
This value will be used if no color depth was explicitely passed to the
`Application`.
.. note::
If the `$PROMPT_TOOLKIT_COLOR_DEPTH` environment variable has been
set, then `outputs.defaults.create_output` will pass this value to
the implementation as the default_color_depth, which is returned
here. (This is not used when the output corresponds to a
prompt_toolkit SSH/Telnet session.)
"""
class DummyOutput(Output):
"""
For testing. An output class that doesn't render anything.
"""
def fileno(self) -> int:
" There is no sensible default for fileno(). "
raise NotImplementedError
def encoding(self) -> str:
return "utf-8"
def write(self, data: str) -> None:
pass
def write_raw(self, data: str) -> None:
pass
def set_title(self, title: str) -> None:
pass
def clear_title(self) -> None:
pass
def flush(self) -> None:
pass
def erase_screen(self) -> None:
pass
def enter_alternate_screen(self) -> None:
pass
def quit_alternate_screen(self) -> None:
pass
def enable_mouse_support(self) -> None:
pass
def disable_mouse_support(self) -> None:
pass
def erase_end_of_line(self) -> None:
pass
def erase_down(self) -> None:
pass
def reset_attributes(self) -> None:
pass
def set_attributes(self, attrs: Attrs, color_depth: ColorDepth) -> None:
pass
def disable_autowrap(self) -> None:
pass
def enable_autowrap(self) -> None:
pass
def cursor_goto(self, row: int = 0, column: int = 0) -> None:
pass
def cursor_up(self, amount: int) -> None:
pass
def cursor_down(self, amount: int) -> None:
pass
def cursor_forward(self, amount: int) -> None:
pass
def cursor_backward(self, amount: int) -> None:
pass
def hide_cursor(self) -> None:
pass
def show_cursor(self) -> None:
pass
def ask_for_cpr(self) -> None:
pass
def bell(self) -> None:
pass
def enable_bracketed_paste(self) -> None:
pass
def disable_bracketed_paste(self) -> None:
pass
def scroll_buffer_to_prompt(self) -> None:
pass
def get_size(self) -> Size:
return Size(rows=40, columns=80)
def get_rows_below_cursor_position(self) -> int:
return 40
def get_default_color_depth(self) -> ColorDepth:
return ColorDepth.DEPTH_1_BIT
| jonathanslenders/python-prompt-toolkit | prompt_toolkit/output/base.py | Python | bsd-3-clause | 7,955 |
#!/usr/bin/env python
import errno
import json
import os
import re
import shutil
import sys
from getmoduleversion import get_version
# set at init time
node_prefix = '/usr/local' # PREFIX variable from Makefile
install_path = None # base target directory (DESTDIR + PREFIX from Makefile)
target_defaults = None
variables = None
def abspath(*args):
path = os.path.join(*args)
return os.path.abspath(path)
def load_config():
s = open('config.gypi').read()
s = re.sub(r'#.*?\n', '', s) # strip comments
s = re.sub(r'\'', '"', s) # convert quotes
return json.loads(s)
def try_unlink(path):
try:
os.unlink(path)
except OSError, e:
if e.errno != errno.ENOENT: raise
def try_symlink(source_path, link_path):
print 'symlinking %s -> %s' % (source_path, link_path)
try_unlink(link_path)
os.symlink(source_path, link_path)
def try_mkdir_r(path):
try:
os.makedirs(path)
except OSError, e:
if e.errno != errno.EEXIST: raise
def try_rmdir_r(path):
path = abspath(path)
while path.startswith(install_path):
try:
os.rmdir(path)
except OSError, e:
if e.errno == errno.ENOTEMPTY: return
if e.errno == errno.ENOENT: return
raise
path = abspath(path, '..')
def mkpaths(path, dst):
if dst.endswith('/'):
target_path = abspath(install_path, dst, os.path.basename(path))
else:
target_path = abspath(install_path, dst)
return path, target_path
def try_copy(path, dst):
source_path, target_path = mkpaths(path, dst)
print 'installing %s' % target_path
try_mkdir_r(os.path.dirname(target_path))
try_unlink(target_path) # prevent ETXTBSY errors
return shutil.copy2(source_path, target_path)
def try_remove(path, dst):
source_path, target_path = mkpaths(path, dst)
print 'removing %s' % target_path
try_unlink(target_path)
try_rmdir_r(os.path.dirname(target_path))
def install(paths, dst): map(lambda path: try_copy(path, dst), paths)
def uninstall(paths, dst): map(lambda path: try_remove(path, dst), paths)
def npm_files(action):
target_path = 'lib/node_modules/npm/'
# don't install npm if the target path is a symlink, it probably means
# that a dev version of npm is installed there
if os.path.islink(abspath(install_path, target_path)): return
# npm has a *lot* of files and it'd be a pain to maintain a fixed list here
# so we walk its source directory instead...
for dirname, subdirs, basenames in os.walk('deps/npm', topdown=True):
subdirs[:] = filter('test'.__ne__, subdirs) # skip test suites
paths = [os.path.join(dirname, basename) for basename in basenames]
action(paths, target_path + dirname[9:] + '/')
# create/remove symlink
link_path = abspath(install_path, 'bin/npm')
if action == uninstall:
action([link_path], 'bin/npm')
elif action == install:
try_symlink('../lib/node_modules/npm/bin/npm-cli.js', link_path)
else:
assert(0) # unhandled action type
# create/remove symlink
link_path = abspath(install_path, 'bin/npx')
if action == uninstall:
action([link_path], 'bin/npx')
elif action == install:
try_symlink('../lib/node_modules/npm/bin/npx-cli.js', link_path)
else:
assert(0) # unhandled action type
def subdir_files(path, dest, action):
ret = {}
for dirpath, dirnames, filenames in os.walk(path):
files = [dirpath + '/' + f for f in filenames if f.endswith('.h')]
ret[dest + dirpath.replace(path, '')] = files
for subdir, files in ret.items():
action(files, subdir + '/')
def files(action):
is_windows = sys.platform == 'win32'
output_file = 'node'
output_prefix = 'out/Release/'
if 'false' == variables.get('node_shared'):
if is_windows:
output_file += '.exe'
else:
if is_windows:
output_file += '.dll'
else:
output_file = 'lib' + output_file + '.' + variables.get('shlib_suffix')
# GYP will output to lib.target except on OS X, this is hardcoded
# in its source - see the _InstallableTargetInstallPath function.
if sys.platform != 'darwin':
output_prefix += 'lib.target/'
action([output_prefix + output_file], 'bin/' + output_file)
if 'true' == variables.get('node_use_dtrace'):
action(['out/Release/node.d'], 'lib/dtrace/node.d')
# behave similarly for systemtap
action(['src/node.stp'], 'share/systemtap/tapset/')
action(['deps/v8/tools/gdbinit'], 'share/doc/node/')
action(['deps/v8/tools/lldbinit'], 'share/doc/node/')
action(['deps/v8/tools/lldb_commands.py'], 'share/doc/node/')
if 'freebsd' in sys.platform or 'openbsd' in sys.platform:
action(['doc/node.1'], 'man/man1/')
else:
action(['doc/node.1'], 'share/man/man1/')
if 'true' == variables.get('node_install_npm'): npm_files(action)
headers(action)
def headers(action):
action([
'common.gypi',
'config.gypi',
'src/node.h',
'src/node_api.h',
'src/node_api_types.h',
'src/node_buffer.h',
'src/node_object_wrap.h',
'src/node_version.h',
], 'include/node/')
# Add the expfile that is created on AIX
if sys.platform.startswith('aix'):
action(['out/Release/node.exp'], 'include/node/')
subdir_files('deps/v8/include', 'include/node/', action)
if 'false' == variables.get('node_shared_libuv'):
subdir_files('deps/uv/include', 'include/node/', action)
if 'true' == variables.get('node_use_openssl') and \
'false' == variables.get('node_shared_openssl'):
subdir_files('deps/openssl/openssl/include/openssl', 'include/node/openssl/', action)
subdir_files('deps/openssl/config/archs', 'include/node/openssl/archs', action)
action(['deps/openssl/config/opensslconf.h'], 'include/node/openssl/')
if 'false' == variables.get('node_shared_zlib'):
action([
'deps/zlib/zconf.h',
'deps/zlib/zlib.h',
], 'include/node/')
def run(args):
global node_prefix, install_path, target_defaults, variables
# chdir to the project's top-level directory
os.chdir(abspath(os.path.dirname(__file__), '..'))
conf = load_config()
variables = conf['variables']
target_defaults = conf['target_defaults']
# argv[2] is a custom install prefix for packagers (think DESTDIR)
# argv[3] is a custom install prefix (think PREFIX)
# Difference is that dst_dir won't be included in shebang lines etc.
dst_dir = args[2] if len(args) > 2 else ''
if len(args) > 3:
node_prefix = args[3]
# install_path thus becomes the base target directory.
install_path = dst_dir + node_prefix + '/'
cmd = args[1] if len(args) > 1 else 'install'
if os.environ.get('HEADERS_ONLY'):
if cmd == 'install': return headers(install)
if cmd == 'uninstall': return headers(uninstall)
else:
if cmd == 'install': return files(install)
if cmd == 'uninstall': return files(uninstall)
raise RuntimeError('Bad command: %s\n' % cmd)
if __name__ == '__main__':
run(sys.argv[:])
| hoho/dosido | nodejs/tools/install.py | Python | mit | 6,853 |
from __future__ import division
from builtins import range
from distutils.version import LooseVersion
import numpy as np
from scipy import ndimage
from scipy.ndimage import measurements
from skimage.filter import threshold_otsu
try:
import cv2
except ImportError:
cv2_available = False
else:
cv2_available = LooseVersion(cv2.__version__) >= LooseVersion('2.4.8')
import sima.misc
from .segment import (
SegmentationStrategy, ROIFilter, CircularityFilter, PostProcessingStep)
from .normcut import BasicAffinityMatrix, PlaneNormalizedCuts
from .segment import _check_single_plane
from sima.ROI import ROI, ROIList
class CA1PCNucleus(PostProcessingStep):
"""Return ROI structures containing CA1 pyramidal cell somata.
Parameters
----------
cuts : list of sima.normcut.CutRegion
The segmented regions identified by normalized cuts.
circularity_threhold : float
ROIs with circularity below threshold are discarded. Default: 0.5.
min_roi_size : int, optional
ROIs with fewer than min_roi_size pixels are discarded. Default: 20.
min_cut_size : int, optional
No ROIs are made from cuts with fewer than min_cut_size pixels.
Default: 30.
channel : int, optional
The index of the channel to be used.
x_diameter : int, optional
The estimated x-diameter of the nuclei in pixels
y_diameter : int, optional
The estimated y_diameter of the nuclei in pixels
Returns
-------
sima.ROI.ROIList
ROI structures each corresponding to a CA1 pyramidal cell soma.
"""
def __init__(self, channel=0, x_diameter=8, y_diameter=None):
if y_diameter is None:
y_diameter = x_diameter
self._channel = channel
self._x_diameter = x_diameter
self._y_diameter = y_diameter
def apply(self, rois, dataset):
channel = sima.misc.resolve_channels(self._channel,
dataset.channel_names)
processed_im = _processed_image_ca1pc(
dataset, channel, self._x_diameter, self._y_diameter)[0]
shape = processed_im.shape[:2]
ROIs = ROIList([])
for roi in rois:
roi_indices = np.nonzero(roi.mask[0])
roi_indices = np.ravel_multi_index(roi_indices, shape)
# pixel values in the cut
vals = processed_im.flat[roi_indices]
# indices of those values below the otsu threshold
# if all values are identical, continue without adding an ROI
try:
roi_indices = roi_indices[vals < threshold_otsu(vals)]
except ValueError:
continue
# apply binary opening and closing to the surviving pixels
# expand the shape by 1 in all directions to correct for edge
# effects of binary opening/closing
twoD_indices = [np.unravel_index(x, shape) for x in roi_indices]
mask = np.zeros([x + 2 for x in shape])
for indices in twoD_indices:
mask[indices[0] + 1, indices[1] + 1] = 1
mask = ndimage.binary_closing(ndimage.binary_opening(mask))
mask = mask[1:-1, 1:-1]
roi_indices = np.where(mask.flat)[0]
# label blobs in each cut
labeled_array, num_features = measurements.label(mask)
for feat in range(num_features):
blob_inds = np.where(labeled_array.flat == feat + 1)[0]
twoD_indices = [np.unravel_index(x, shape) for x in blob_inds]
mask = np.zeros(shape)
for x in twoD_indices:
mask[x] = 1
ROIs.append(ROI(mask=mask))
return ROIs
def _clahe(image, x_tile_size=10, y_tile_size=10, clip_limit=20):
"""Perform contrast limited adaptive histogram equalization (CLAHE)."""
if not cv2_available:
raise ImportError('OpenCV >= 2.4.8 required')
transform = cv2.createCLAHE(clipLimit=clip_limit,
tileGridSize=(
int(image.shape[1] // float(x_tile_size)),
int(image.shape[0] // float(y_tile_size))))
return transform.apply(sima.misc.to8bit(image))
def _unsharp_mask(image, mask_weight, image_weight=1.35, sigma_x=10,
sigma_y=10):
"""Perform unsharp masking on an image.."""
if not cv2_available:
raise ImportError('OpenCV >= 2.4.8 required')
return cv2.addWeighted(
sima.misc.to8bit(image), image_weight,
cv2.GaussianBlur(sima.misc.to8bit(image), (0, 0), sigma_x, sigma_y),
-mask_weight, 0)
def _processed_image_ca1pc(dataset, channel_idx=-1, x_diameter=10,
y_diameter=10):
"""Create a processed image for identifying CA1 pyramidal cell ROIs."""
unsharp_mask_mask_weight = 0.5
im = dataset.time_averages[..., channel_idx]
result = []
for plane_idx in np.arange(dataset.frame_shape[0]):
result.append(_unsharp_mask(
_clahe(im[plane_idx], x_diameter, y_diameter),
unsharp_mask_mask_weight, 1 + unsharp_mask_mask_weight,
x_diameter, y_diameter))
return np.array(result)
class AffinityMatrixCA1PC(BasicAffinityMatrix):
def __init__(self, channel=0, max_dist=None, spatial_decay=None,
num_pcs=75, x_diameter=10, y_diameter=10, verbose=False):
super(AffinityMatrixCA1PC, self).__init__(
channel, max_dist, spatial_decay, num_pcs, verbose)
self._params['x_diameter'] = x_diameter
self._params['y_diameter'] = y_diameter
def _setup(self, dataset):
super(AffinityMatrixCA1PC, self)._setup(dataset)
channel = sima.misc.resolve_channels(self._params['channel'],
dataset.channel_names)
processed_image = _processed_image_ca1pc(
dataset, channel, self._params['x_diameter'],
self._params['y_diameter'])[0]
time_avg = processed_image
std = np.std(time_avg)
time_avg = np.minimum(time_avg, 2 * std)
self._time_avg = np.maximum(time_avg, -2 * std)
self._dm = time_avg.max() - time_avg.min()
def _weight(self, r0, r1):
w = super(AffinityMatrixCA1PC, self)._weight(r0, r1)
dy = r1[0] - r0[0]
dx = r1[1] - r0[1]
m = -np.Inf
for xt in range(dx + 1):
if dx != 0:
ya = r0[0] + 0.5 + max(0., xt - 0.5) * float(dy) / float(dx)
yb = r0[0] + 0.5 + min(xt + 0.5, dx) * float(dy) / float(dx)
else:
ya = r0[0]
yb = r1[0]
ym = int(min(ya, yb))
yM = int(max(ya, yb))
for yt in range(ym, yM + 1):
m = max(m, self._time_avg[yt, r0[1] + xt])
return w * np.exp(-3. * m / self._dm)
class PlaneCA1PC(SegmentationStrategy):
"""Segmentation method designed for finding CA1 pyramidal cell somata.
Parameters
----------
channel : int, optional
The channel whose signals will be used in the calculations.
num_pcs : int, optional
The number of principle components to be used in the calculations.
Default: 75.
max_dist : tuple of int, optional
Defaults to (2, 2).
spatial_decay : tuple of int, optional
Defaults to (2, 2).
max_pen : float
Iterative cutting will continue as long as the cut cost is less
than max_pen.
cut_min_size, cut_max_size : int
Regardless of the cut cost, iterative cutting will not be
performed on regions with fewer pixels than min_size and will
always be performed on regions larger than max_size.
circularity_threhold : float
ROIs with circularity below threshold are discarded. Default: 0.5.
min_roi_size : int, optional
ROIs with fewer than min_roi_size pixels are discarded. Default: 20.
min_cut_size : int, optional
No ROIs are made from cuts with fewer than min_cut_size pixels.
Default: 30.
x_diameter : int, optional
The estimated x-diameter of the nuclei in pixels. Default: 8
y_diameter : int, optional
The estimated x-diameter of the nuclei in pixels. Default: 8
Notes
-----
This method begins with the normalized cuts procedure. The affinities
used for normalized cuts also depend on the time-averaged image,
which is used to increase the affinity between pixels within the
same dark patch (putative nucleus).
Following the normalized cut procedure, the algorithm attempts to
identify a nucleus within each cut.
* Otsu thresholding of the time-averaged image after processing with
CLAHE and unsharp mask procedures.
* Binary opening and closing of the resulting ROIs.
* Rejection of ROIs not passing the circularity and size requirements.
See also
--------
sima.segment.normcut
Warning
-------
In version 1.0, this method currently only works on datasets with a
single plane, or in conjunction with
:class:`sima.segment.PlaneWiseSegmentation`.
"""
def __init__(
self, channel=0, num_pcs=75, max_dist=None, spatial_decay=None,
cut_max_pen=0.01, cut_min_size=40, cut_max_size=200, x_diameter=10,
y_diameter=10, circularity_threhold=.5, min_roi_size=20,
min_cut_size=30, verbose=False):
super(PlaneCA1PC, self).__init__()
affinity_method = AffinityMatrixCA1PC(
channel, max_dist, spatial_decay, num_pcs, x_diameter, y_diameter,
verbose)
self._normcut_method = PlaneNormalizedCuts(
affinity_method, cut_max_pen, cut_min_size, cut_max_size)
self._normcut_method.append(
ROIFilter(lambda r: r.size >= min_cut_size))
self._normcut_method.append(
CA1PCNucleus(channel, x_diameter, y_diameter))
self._normcut_method.append(
ROIFilter(lambda r: r.size >= min_roi_size))
self._normcut_method.append(CircularityFilter(circularity_threhold))
@_check_single_plane
def _segment(self, dataset):
return self._normcut_method.segment(dataset)
| llerussell/sima | sima/segment/ca1pc.py | Python | gpl-2.0 | 10,288 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gconf
import gtk
import logging
import subprocess
from pango import FontDescription
from xml.sax.saxutils import escape as xml_escape
import guake.notifier
from guake.common import _
from guake.common import pixmapfile
from guake.globals import GCONF_PATH
from guake.globals import GKEY
from guake.globals import KEY
from guake.globals import LKEY
GCONF_MONOSPACE_FONT_PATH = '/desktop/gnome/interface/monospace_font_name'
DCONF_MONOSPACE_FONT_PATH = 'org.gnome.desktop.interface'
DCONF_MONOSPACE_FONT_KEY = 'monospace-font-name'
log = logging.getLogger(__name__)
class GConfHandler(object):
"""Handles gconf changes, if any gconf variable is changed, a
different method is called to handle this change.
"""
def __init__(self, guake):
"""Constructor of GConfHandler, just add the guake dir to the
gconf client and bind the keys to its handler methods.
"""
self.guake = guake
client = gconf.client_get_default()
client.add_dir(GCONF_PATH, gconf.CLIENT_PRELOAD_RECURSIVE)
notify_add = client.notify_add
# these keys does not need to be watched.
# notify_add(KEY('/general/default_shell'), self.shell_changed)
# notify_add(KEY('/general/use_login_shell'), self.login_shell_toggled)
# notify_add(KEY('/general/use_popup'), self.popup_toggled)
# notify_add(KEY('/general/window_losefocus'), self.losefocus_toggled)
# notify_add(KEY('/general/use_vte_titles'), self.use_vte_titles_changed)
# notify_add(KEY('/general/quick_open_enable'), self.on_quick_open_enable_changed)
# notify_add(KEY('/general/quick_open_in_current_terminal'),
# self.on_quick_open_in_current_terminal_changed)
# Notification is not required for mouse_display/display_n because
# set_final_window_rect polls gconf and is called whenever Guake is
# shown or resized
notify_add(KEY('/general/show_resizer'), self.show_resizer_toggled)
notify_add(KEY('/general/use_trayicon'), self.trayicon_toggled)
notify_add(KEY('/general/window_ontop'), self.ontop_toggled)
notify_add(KEY('/general/tab_ontop'), self.tab_ontop_toggled)
notify_add(KEY('/general/window_tabbar'), self.tabbar_toggled)
notify_add(KEY('/general/window_height'), self.size_changed)
notify_add(KEY('/general/window_width'), self.size_changed)
notify_add(KEY('/general/window_height_f'), self.size_changed)
notify_add(KEY('/general/window_width_f'), self.size_changed)
notify_add(KEY('/general/window_valignment'), self.alignment_changed)
notify_add(KEY('/general/window_halignment'), self.alignment_changed)
notify_add(KEY('/style/cursor_blink_mode'), self.cursor_blink_mode_changed)
notify_add(KEY('/style/cursor_shape'), self.cursor_shape_changed)
notify_add(KEY('/general/use_scrollbar'), self.scrollbar_toggled)
notify_add(KEY('/general/history_size'), self.history_size_changed)
notify_add(KEY('/general/scroll_output'), self.keystroke_output)
notify_add(KEY('/general/scroll_keystroke'), self.keystroke_toggled)
notify_add(KEY('/general/use_default_font'), self.default_font_toggled)
notify_add(KEY('/general/use_palette_font_and_background_color'),
self.palette_font_and_background_color_toggled)
notify_add(KEY('/style/font/style'), self.fstyle_changed)
notify_add(KEY('/style/font/color'), self.fcolor_changed)
notify_add(KEY('/style/font/palette'), self.fpalette_changed)
# notify_add(KEY('/style/font/palette_name'), self.fpalette_changed)
notify_add(KEY('/style/font/allow_bold'), self.allow_bold_toggled)
notify_add(KEY('/style/background/color'), self.bgcolor_changed)
notify_add(KEY('/style/background/image'), self.bgimage_changed)
notify_add(KEY('/style/background/transparency'),
self.bgtransparency_changed)
notify_add(KEY('/general/compat_backspace'), self.backspace_changed)
notify_add(KEY('/general/compat_delete'), self.delete_changed)
notify_add(KEY('/general/custom_command_file'), self.custom_command_file_changed)
notify_add(KEY('/general/max_tab_name_length'), self.max_tab_name_length_changed)
def custom_command_file_changed(self, client, connection_id, entry, data):
self.guake.load_custom_commands()
def show_resizer_toggled(self, client, connection_id, entry, data):
"""If the gconf var show_resizer be changed, this method will
be called and will show/hide the resizer.
"""
if entry.value.get_bool():
self.guake.resizer.show()
else:
self.guake.resizer.hide()
def trayicon_toggled(self, client, connection_id, entry, data):
"""If the gconf var use_trayicon be changed, this method will
be called and will show/hide the trayicon.
"""
if hasattr(self.guake.tray_icon, 'set_status'):
self.guake.tray_icon.set_status(entry.value.get_bool())
else:
self.guake.tray_icon.set_visible(entry.value.get_bool())
def ontop_toggled(self, client, connection_id, entry, data):
"""If the gconf var window_ontop be changed, this method will
be called and will set the keep_above attribute in guake's
main window.
"""
self.guake.window.set_keep_above(entry.value.get_bool())
def tab_ontop_toggled(self, client, connection_id, entry, data):
""" tab_ontop changed
"""
self.guake.set_tab_position()
def tabbar_toggled(self, client, connection_id, entry, data):
"""If the gconf var use_tabbar be changed, this method will be
called and will show/hide the tabbar.
"""
if entry.value.get_bool():
self.guake.toolbar.show()
else:
self.guake.toolbar.hide()
def alignment_changed(self, client, connection_id, entry, data):
"""If the gconf var window_halignment be changed, this method will
be called and will call the move function in guake.
"""
self.guake.set_final_window_rect()
def size_changed(self, client, connection_id, entry, data):
"""If the gconf var window_height or window_width are changed,
this method will be called and will call the resize function
in guake.
"""
self.guake.set_final_window_rect()
def cursor_blink_mode_changed(self, client, connection_id, entry, data):
"""Called when cursor blink mode settings has been changed
"""
for term in self.guake.notebook.iter_terminals():
term.set_property("cursor-blink-mode", entry.value.get_int())
def cursor_shape_changed(self, client, connection_id, entry, data):
"""Called when the cursor shape settings has been changed
"""
for term in self.guake.notebook.iter_terminals():
term.set_property("cursor-shape", entry.value.get_int())
def scrollbar_toggled(self, client, connection_id, entry, data):
"""If the gconf var use_scrollbar be changed, this method will
be called and will show/hide scrollbars of all terminals open.
"""
for term in self.guake.notebook.iter_terminals():
# There is an hbox in each tab of the main notebook and it
# contains a Terminal and a Scrollbar. Since only have the
# Terminal here, we're going to use this to get the
# scrollbar and hide/show it.
hbox = term.get_parent()
terminal, scrollbar = hbox.get_children()
if entry.value.get_bool():
scrollbar.show()
else:
scrollbar.hide()
def history_size_changed(self, client, connection_id, entry, data):
"""If the gconf var history_size be changed, this method will
be called and will set the scrollback_lines property of all
terminals open.
"""
for i in self.guake.notebook.iter_terminals():
i.set_scrollback_lines(entry.value.get_int())
def keystroke_output(self, client, connection_id, entry, data):
"""If the gconf var scroll_output be changed, this method will
be called and will set the scroll_on_output in all terminals
open.
"""
for i in self.guake.notebook.iter_terminals():
i.set_scroll_on_output(entry.value.get_bool())
def keystroke_toggled(self, client, connection_id, entry, data):
"""If the gconf var scroll_keystroke be changed, this method
will be called and will set the scroll_on_keystroke in all
terminals open.
"""
for i in self.guake.notebook.iter_terminals():
i.set_scroll_on_keystroke(entry.value.get_bool())
def default_font_toggled(self, client, connection_id, entry, data):
"""If the gconf var use_default_font be changed, this method
will be called and will change the font style to the gnome
default or to the chosen font in style/font/style in all
terminals open.
"""
font_name = None
if entry.value.get_bool():
# cannot directly use the Gio API since it requires to rework completely
# the library inclusion, remove dependencies on gobject and so on.
# Instead, issuing a direct command line request
proc = subprocess.Popen(['gsettings', 'get', DCONF_MONOSPACE_FONT_PATH,
DCONF_MONOSPACE_FONT_KEY], stdout=subprocess.PIPE)
font_name = proc.stdout.readline().replace("'", "")
if font_name is None:
# Back to gconf
font_name = client.get_string(GCONF_MONOSPACE_FONT_PATH)
proc.kill()
else:
key = KEY('/style/font/style')
font_name = client.get_string(key)
if not font_name:
log.error("Error: unable to find font name !!!")
return
font = FontDescription(font_name)
if not font:
return
for i in self.guake.notebook.iter_terminals():
i.set_font(font)
def allow_bold_toggled(self, client, connection_id, entry, data):
"""If the gconf var allow_bold is changed, this method will be called
and will change the VTE terminal o.
displaying characters in bold font.
"""
for term in self.guake.notebook.iter_terminals():
term.set_allow_bold(entry.value.get_bool())
def palette_font_and_background_color_toggled(self, client, connection_id, entry, data):
"""If the gconf var use_palette_font_and_background_color be changed, this method
will be called and will change the font color and the background color to the color
defined in the palette.
"""
pass
def fstyle_changed(self, client, connection_id, entry, data):
"""If the gconf var style/font/style be changed, this method
will be called and will change the font style in all terminals
open.
"""
font = FontDescription(entry.value.get_string())
for i in self.guake.notebook.iter_terminals():
i.set_font(font)
def fcolor_changed(self, client, connection_id, entry, data):
"""If the gconf var style/font/color be changed, this method
will be called and will change the font color in all terminals
open.
"""
fgcolor = gtk.gdk.color_parse(entry.value.get_string())
use_palette_font_and_background_color = client.get_bool(
KEY('/general/use_palette_font_and_background_color'))
if use_palette_font_and_background_color:
return
for i in self.guake.notebook.iter_terminals():
i.set_color_dim(i.custom_fgcolor or fgcolor)
i.set_color_foreground(i.custom_fgcolor or fgcolor)
i.set_color_bold(i.custom_fgcolor or fgcolor)
def fpalette_changed(self, client, connection_id, entry, data):
"""If the gconf var style/font/palette be changed, this method
will be called and will change the color scheme in all terminals
open.
"""
fgcolor = gtk.gdk.color_parse(
client.get_string(KEY('/style/font/color')))
bgcolor = gtk.gdk.color_parse(
client.get_string(KEY('/style/background/color')))
palette = [gtk.gdk.color_parse(color) for color in
entry.value.get_string().split(':')]
use_palette_font_and_background_color = client.get_bool(
KEY('/general/use_palette_font_and_background_color'))
if use_palette_font_and_background_color and len(palette) > 16:
fgcolor = palette[16]
bgcolor = palette[17]
for i in self.guake.notebook.iter_terminals():
i.set_color_dim(fgcolor)
i.set_color_foreground(fgcolor)
i.set_color_bold(fgcolor)
i.set_color_background(bgcolor)
i.set_background_tint_color(bgcolor)
for i in self.guake.notebook.iter_terminals():
i.set_colors(fgcolor, bgcolor, palette[:16])
def bgcolor_changed(self, client, connection_id, entry, data):
"""If the gconf var style/background/color be changed, this
method will be called and will change the background color in
all terminals open.
"""
use_palette_font_and_background_color = client.get_bool(
KEY('/general/use_palette_font_and_background_color'))
if use_palette_font_and_background_color:
log.debug("do not set background from user")
return
bgcolor = gtk.gdk.color_parse(entry.value.get_string())
for i in self.guake.notebook.iter_terminals():
i.set_color_background(i.custom_bgcolor or bgcolor)
i.set_background_tint_color(i.custom_bgcolor or bgcolor)
def bgimage_changed(self, client, connection_id, entry, data):
"""If the gconf var style/background/image be changed, this
method will be called and will change the background image and
will set the transparent flag to false if an image is set in
all terminals open.
"""
self.guake.set_background_image(entry.value.get_string())
def bgtransparency_changed(self, client, connection_id, entry, data):
"""If the gconf var style/background/transparency be changed, this
method will be called and will set the saturation and transparency
properties in all terminals open.
"""
self.guake.set_background_transparency(entry.value.get_int())
def backspace_changed(self, client, connection_id, entry, data):
"""If the gconf var compat_backspace be changed, this method
will be called and will change the binding configuration in
all terminals open.
"""
for i in self.guake.notebook.iter_terminals():
i.set_backspace_binding(entry.value.get_string())
def delete_changed(self, client, connection_id, entry, data):
"""If the gconf var compat_delete be changed, this method
will be called and will change the binding configuration in
all terminals open.
"""
for i in self.guake.notebook.iter_terminals():
i.set_delete_binding(entry.value.get_string())
def max_tab_name_length_changed(self, client, connection_id, entry, data):
"""If the gconf var max_tab_name_length be changed, this method will
be called and will set the tab name length limit.
"""
# avoid get window title before terminal is ready
if self.guake.notebook.get_current_terminal().get_window_title() is None:
return
max_name_length = client.get_int(KEY("/general/max_tab_name_length"))
if max_name_length == 0:
max_name_length = None
vte_titles_on = client.get_bool(KEY("/general/use_vte_titles"))
tab_name = self.guake.notebook.get_current_terminal(
).get_window_title() if vte_titles_on else _("Terminal")
for tab in self.guake.tabs.get_children():
if not getattr(tab, 'custom_label_set', False):
tab.set_label(tab_name[:max_name_length])
else:
# retrieve the custom tab name to restore it
tab_custom_name = getattr(tab, 'custom_label_text', False)
tab.set_label(tab_custom_name[:max_name_length])
class GConfKeyHandler(object):
"""Handles changes in keyboard shortcuts.
"""
def __init__(self, guake):
"""Constructor of Keyboard, only receives the guake instance
to be used in internal methods.
"""
self.guake = guake
self.accel_group = None # see reload_accelerators
self.client = gconf.client_get_default()
notify_add = self.client.notify_add
# Setup global keys
self.globalhotkeys = {}
globalkeys = ['show_hide']
for key in globalkeys:
notify_add(GKEY(key), self.reload_global)
self.client.notify(GKEY(key))
# Setup local keys
keys = ['toggle_fullscreen', 'new_tab', 'close_tab', 'rename_current_tab',
'previous_tab', 'next_tab', 'clipboard_copy', 'clipboard_paste',
'quit', 'zoom_in', 'zoom_out', 'increase_height', 'decrease_height',
'increase_transparency', 'decrease_transparency', 'toggle_transparency',
"search_on_web", 'move_tab_left', 'move_tab_right',
'switch_tab1', 'switch_tab2', 'switch_tab3', 'switch_tab4', 'switch_tab5',
'switch_tab6', 'switch_tab7', 'switch_tab8', 'switch_tab9', 'switch_tab10',
'reset_terminal']
for key in keys:
notify_add(LKEY(key), self.reload_accelerators)
self.client.notify(LKEY(key))
def reload_global(self, client, connection_id, entry, data):
"""Unbind all global hotkeys and rebind the show_hide
method. If more global hotkeys should be added, just connect
the gconf key to the watch system and add.
"""
gkey = entry.get_key()
key = entry.get_value().get_string()
try:
self.guake.hotkeys.unbind(self.globalhotkeys[gkey])
except KeyError:
pass
self.globalhotkeys[gkey] = key
if not self.guake.hotkeys.bind(key, self.guake.show_hide):
keyval, mask = gtk.accelerator_parse(key)
label = gtk.accelerator_get_label(keyval, mask)
filename = pixmapfile('guake-notification.png')
guake.notifier.show_message(
_('Guake Terminal'),
_('A problem happened when binding <b>%s</b> key.\n'
'Please use Guake Preferences dialog to choose another '
'key') % xml_escape(label), filename)
def reload_accelerators(self, *args):
"""Reassign an accel_group to guake main window and guake
context menu and calls the load_accelerators method.
"""
if self.accel_group:
self.guake.window.remove_accel_group(self.accel_group)
self.accel_group = gtk.AccelGroup()
self.guake.window.add_accel_group(self.accel_group)
self.guake.context_menu.set_accel_group(self.accel_group)
self.load_accelerators()
def load_accelerators(self):
"""Reads all gconf paths under /apps/guake/keybindings/local
and adds to the main accel_group.
"""
gets = lambda x: self.client.get_string(LKEY(x))
key, mask = gtk.accelerator_parse(gets('reset_terminal'))
if key > 0:
self.accel_group.connect_group(key, mask, gtk.ACCEL_VISIBLE,
self.guake.accel_reset_terminal)
key, mask = gtk.accelerator_parse(gets('quit'))
if key > 0:
self.accel_group.connect_group(key, mask, gtk.ACCEL_VISIBLE,
self.guake.accel_quit)
key, mask = gtk.accelerator_parse(gets('new_tab'))
if key > 0:
self.accel_group.connect_group(key, mask, gtk.ACCEL_VISIBLE,
self.guake.accel_add)
key, mask = gtk.accelerator_parse(gets('close_tab'))
if key > 0:
self.accel_group.connect_group(
key, mask, gtk.ACCEL_VISIBLE,
self.guake.close_tab)
key, mask = gtk.accelerator_parse(gets('previous_tab'))
if key > 0:
self.accel_group.connect_group(key, mask, gtk.ACCEL_VISIBLE,
self.guake.accel_prev)
key, mask = gtk.accelerator_parse(gets('next_tab'))
if key > 0:
self.accel_group.connect_group(key, mask, gtk.ACCEL_VISIBLE,
self.guake.accel_next)
key, mask = gtk.accelerator_parse(gets('move_tab_left'))
if key > 0:
self.accel_group.connect_group(key, mask, gtk.ACCEL_VISIBLE,
self.guake.accel_move_tab_left)
key, mask = gtk.accelerator_parse(gets('move_tab_right'))
if key > 0:
self.accel_group.connect_group(key, mask, gtk.ACCEL_VISIBLE,
self.guake.accel_move_tab_right)
key, mask = gtk.accelerator_parse(gets('rename_current_tab'))
if key > 0:
self.accel_group.connect_group(key, mask, gtk.ACCEL_VISIBLE,
self.guake.accel_rename_current_tab)
key, mask = gtk.accelerator_parse(gets('clipboard_copy'))
if key > 0:
self.accel_group.connect_group(key, mask, gtk.ACCEL_VISIBLE,
self.guake.accel_copy_clipboard)
key, mask = gtk.accelerator_parse(gets('clipboard_paste'))
if key > 0:
self.accel_group.connect_group(key, mask, gtk.ACCEL_VISIBLE,
self.guake.accel_paste_clipboard)
key, mask = gtk.accelerator_parse(gets('toggle_fullscreen'))
if key > 0:
self.accel_group.connect_group(key, mask, gtk.ACCEL_VISIBLE,
self.guake.accel_toggle_fullscreen)
key, mask = gtk.accelerator_parse(gets('toggle_hide_on_lose_focus'))
if key > 0:
self.accel_group.connect_group(key, mask, gtk.ACCEL_VISIBLE,
self.guake.accel_toggle_hide_on_lose_focus)
key, mask = gtk.accelerator_parse(gets('zoom_in'))
if key > 0:
self.accel_group.connect_group(key, mask, gtk.ACCEL_VISIBLE,
self.guake.accel_zoom_in)
key, mask = gtk.accelerator_parse(gets('zoom_in_alt'))
if key > 0:
self.accel_group.connect_group(key, mask, gtk.ACCEL_VISIBLE,
self.guake.accel_zoom_in)
key, mask = gtk.accelerator_parse(gets('zoom_out'))
if key > 0:
self.accel_group.connect_group(key, mask, gtk.ACCEL_VISIBLE,
self.guake.accel_zoom_out)
key, mask = gtk.accelerator_parse(gets('increase_height'))
if key > 0:
self.accel_group.connect_group(key, mask, gtk.ACCEL_VISIBLE,
self.guake.accel_increase_height)
key, mask = gtk.accelerator_parse(gets('decrease_height'))
if key > 0:
self.accel_group.connect_group(key, mask, gtk.ACCEL_VISIBLE,
self.guake.accel_decrease_height)
key, mask = gtk.accelerator_parse(gets('increase_transparency'))
if key > 0:
self.accel_group.connect_group(key, mask, gtk.ACCEL_VISIBLE,
self.guake.accel_increase_transparency)
key, mask = gtk.accelerator_parse(gets('decrease_transparency'))
if key > 0:
self.accel_group.connect_group(key, mask, gtk.ACCEL_VISIBLE,
self.guake.accel_decrease_transparency)
key, mask = gtk.accelerator_parse(gets('toggle_transparency'))
if key > 0:
self.accel_group.connect_group(key, mask, gtk.ACCEL_VISIBLE,
self.guake.accel_toggle_transparency)
for tab in xrange(1, 11):
key, mask = gtk.accelerator_parse(gets('switch_tab%d' % tab))
if key > 0:
self.accel_group.connect_group(key, mask, gtk.ACCEL_VISIBLE,
self.guake.gen_accel_switch_tabN(tab - 1))
try:
key, mask = gtk.accelerator_parse(gets('search_on_web'))
if key > 0:
self.accel_group.connect_group(key, mask, gtk.ACCEL_VISIBLE,
self.guake.search_on_web)
except Exception:
log.exception("Exception occured")
| tirkarthi/guake | src/guake/gconfhandler.py | Python | gpl-2.0 | 25,365 |
"""
General tests for all estimators in sklearn.
"""
import os
import warnings
import sys
import traceback
import numpy as np
from scipy import sparse
from nose.tools import assert_raises, assert_equal, assert_true
from numpy.testing import assert_array_equal, \
assert_array_almost_equal
import sklearn
from sklearn.utils.testing import all_estimators
from sklearn.utils.testing import assert_greater
from sklearn.base import clone, ClassifierMixin, RegressorMixin, \
TransformerMixin, ClusterMixin
from sklearn.utils import shuffle
from sklearn.preprocessing import Scaler
#from sklearn.cross_validation import train_test_split
from sklearn.datasets import load_iris, load_boston, make_blobs
from sklearn.metrics import zero_one_score, adjusted_rand_score
from sklearn.lda import LDA
from sklearn.svm.base import BaseLibSVM
# import "special" estimators
from sklearn.grid_search import GridSearchCV
from sklearn.decomposition import SparseCoder
from sklearn.pipeline import Pipeline
from sklearn.pls import _PLS, PLSCanonical, PLSRegression, CCA, PLSSVD
from sklearn.ensemble import BaseEnsemble
from sklearn.multiclass import OneVsOneClassifier, OneVsRestClassifier,\
OutputCodeClassifier
from sklearn.feature_selection import RFE, RFECV, SelectKBest
from sklearn.naive_bayes import MultinomialNB, BernoulliNB
from sklearn.covariance import EllipticEnvelope, EllipticEnvelop
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.preprocessing import LabelBinarizer, LabelEncoder, Binarizer, \
Normalizer
from sklearn.cluster import WardAgglomeration, AffinityPropagation, \
SpectralClustering
dont_test = [Pipeline, GridSearchCV, SparseCoder, EllipticEnvelope,
EllipticEnvelop, DictVectorizer, LabelBinarizer, LabelEncoder,
TfidfTransformer]
meta_estimators = [BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV]
def test_all_estimators():
# Test that estimators are default-constructible, clonable
# and have working repr.
estimators = all_estimators()
clf = LDA()
for name, E in estimators:
# some can just not be sensibly default constructed
if E in dont_test:
continue
# test default-constructibility
# get rid of deprecation warnings
with warnings.catch_warnings(record=True):
if E in meta_estimators:
e = E(clf)
else:
e = E()
#test cloning
clone(e)
# test __repr__
repr(e)
def test_estimators_sparse_data():
# All estimators should either deal with sparse data, or raise an
# intelligible error message
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
estimators = all_estimators()
estimators = [(name, E) for name, E in estimators
if issubclass(E, (ClassifierMixin, RegressorMixin))]
for name, Clf in estimators:
if Clf in dont_test or Clf in meta_estimators:
continue
# catch deprecation warnings
with warnings.catch_warnings(record=True):
clf = Clf()
# fit
try:
clf.fit(X, y)
except TypeError, e:
if not 'sparse' in repr(e):
print ("Estimator %s doesn't seem to fail gracefully on "
"sparse data" % name)
traceback.print_exc(file=sys.stdout)
raise e
except Exception, exc:
print ("Estimator %s doesn't seem to fail gracefully on "
"sparse data" % name)
traceback.print_exc(file=sys.stdout)
raise exc
def test_transformers():
# test if transformers do something sensible on training set
# also test all shapes / shape errors
estimators = all_estimators()
transformers = [(name, E) for name, E in estimators if issubclass(E,
TransformerMixin)]
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
n_samples, n_features = X.shape
X = Scaler().fit_transform(X)
X -= X.min()
succeeded = True
for name, Trans in transformers:
if Trans in dont_test or Trans in meta_estimators:
continue
# these don't actually fit the data:
if Trans in [AdditiveChi2Sampler, Binarizer, Normalizer]:
continue
# catch deprecation warnings
with warnings.catch_warnings(record=True):
trans = Trans()
if hasattr(trans, 'compute_importances'):
trans.compute_importances = True
if Trans is SelectKBest:
# SelectKBest has a default of k=10
# which is more feature than we have.
trans.k = 1
# fit
if Trans in (_PLS, PLSCanonical, PLSRegression, CCA, PLSSVD):
y_ = np.vstack([y, 2 * y + np.random.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
try:
trans.fit(X, y_)
X_pred = trans.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
assert_equal(X_pred.shape[0], n_samples)
except Exception as e:
print trans
print e
print
succeeded = False
if hasattr(trans, 'transform'):
if Trans in (_PLS, PLSCanonical, PLSRegression, CCA, PLSSVD):
X_pred2 = trans.transform(X, y_)
else:
X_pred2 = trans.transform(X)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2 in zip(X_pred, X_pred2):
assert_array_almost_equal(x_pred, x_pred2, 2,
"fit_transform not correct in %s" % Trans)
else:
assert_array_almost_equal(X_pred, X_pred2, 2,
"fit_transform not correct in %s" % Trans)
# raises error on malformed input for transform
assert_raises(ValueError, trans.transform, X.T)
assert_true(succeeded)
def test_transformers_sparse_data():
# All estimators should either deal with sparse data, or raise an
# intelligible error message
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
estimators = all_estimators()
estimators = [(name, E) for name, E in estimators
if issubclass(E, TransformerMixin)]
for name, Trans in estimators:
if Trans in dont_test or Trans in meta_estimators:
continue
# catch deprecation warnings
with warnings.catch_warnings(record=True):
if Trans is Scaler:
trans = Trans(with_mean=False)
else:
trans = Trans()
# fit
try:
trans.fit(X, y)
except TypeError, e:
if not 'sparse' in repr(e):
print ("Estimator %s doesn't seem to fail gracefully on "
"sparse data" % name)
traceback.print_exc(file=sys.stdout)
raise e
except Exception, exc:
print ("Estimator %s doesn't seem to fail gracefully on "
"sparse data" % name)
traceback.print_exc(file=sys.stdout)
raise exc
def test_classifiers_one_label():
# test classifiers trained on a single label always return this label
# or raise an sensible error message
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
estimators = all_estimators()
classifiers = [(name, E) for name, E in estimators if issubclass(E,
ClassifierMixin)]
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
for name, Clf in classifiers:
if Clf in dont_test or Clf in meta_estimators:
continue
# catch deprecation warnings
with warnings.catch_warnings(record=True):
clf = Clf()
# try to fit
try:
clf.fit(X_train, y)
except ValueError, e:
if not 'class' in repr(e):
print(error_string_fit, Clf, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
continue
except Exception, exc:
print(error_string_fit, Clf, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(clf.predict(X_test), y)
except Exception, exc:
print(error_string_predict, Clf, exc)
traceback.print_exc(file=sys.stdout)
def test_clustering():
# test if clustering algorithms do something sensible
# also test all shapes / shape errors
estimators = all_estimators()
clustering = [(name, E) for name, E in estimators if issubclass(E,
ClusterMixin)]
iris = load_iris()
X, y = iris.data, iris.target
X, y = shuffle(X, y, random_state=7)
n_samples, n_features = X.shape
X = Scaler().fit_transform(X)
for name, Alg in clustering:
if Alg is WardAgglomeration:
# this is clustering on the features
# let's not test that here.
continue
# catch deprecation and neighbors warnings
with warnings.catch_warnings(record=True):
alg = Alg()
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
if hasattr(alg, "random_state"):
alg.set_params(random_state=1)
if Alg is AffinityPropagation:
alg.set_params(preference=-100)
# fit
alg.fit(X)
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if Alg is SpectralClustering:
# there is no way to make Spectral clustering deterministic :(
continue
if hasattr(alg, "random_state"):
alg.set_params(random_state=1)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def test_classifiers_train():
# test if classifiers do something sensible on training set
# also test all shapes / shape errors
estimators = all_estimators()
classifiers = [(name, E) for name, E in estimators if issubclass(E,
ClassifierMixin)]
iris = load_iris()
X_m, y_m = iris.data, iris.target
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = Scaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
# do it once with binary, once with multiclass
n_labels = len(np.unique(y))
n_samples, n_features = X.shape
for name, Clf in classifiers:
if Clf in dont_test or Clf in meta_estimators:
continue
if Clf in [MultinomialNB, BernoulliNB]:
# TODO also test these!
continue
# catch deprecation warnings
with warnings.catch_warnings(record=True):
clf = Clf()
# raises error on malformed input for fit
assert_raises(ValueError, clf.fit, X, y[:-1])
# fit
clf.fit(X, y)
y_pred = clf.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
assert_greater(zero_one_score(y, y_pred), 0.78)
# raises error on malformed input for predict
assert_raises(ValueError, clf.predict, X.T)
if hasattr(clf, "decision_function"):
try:
# decision_function agrees with predict:
decision = clf.decision_function(X)
if n_labels is 2:
assert_equal(decision.ravel().shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if n_labels is 3 and not isinstance(clf, BaseLibSVM):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_labels))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError, clf.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError, clf.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(clf, "predict_proba"):
try:
# predict_proba agrees with predict:
y_prob = clf.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_labels))
# raises error on malformed input
assert_raises(ValueError, clf.predict_proba, X.T)
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# raises error on malformed input for predict_proba
assert_raises(ValueError, clf.predict_proba, X.T)
except NotImplementedError:
pass
def test_classifiers_classes():
# test if classifiers can cope with non-consecutive classes
estimators = all_estimators()
classifiers = [(name, E) for name, E in estimators if issubclass(E,
ClassifierMixin)]
iris = load_iris()
X, y = iris.data, iris.target
X, y = shuffle(X, y, random_state=7)
X = Scaler().fit_transform(X)
y = 2 * y + 1
# TODO: make work with next line :)
#y = y.astype(np.str)
for name, Clf in classifiers:
if Clf in dont_test or Clf in meta_estimators:
continue
if Clf in [MultinomialNB, BernoulliNB]:
# TODO also test these!
continue
# catch deprecation warnings
with warnings.catch_warnings(record=True):
clf = Clf()
# fit
clf.fit(X, y)
y_pred = clf.predict(X)
# training set performance
assert_array_equal(np.unique(y), np.unique(y_pred))
assert_greater(zero_one_score(y, y_pred), 0.78)
def test_regressors_int():
# test if regressors can cope with integer labels (by converting them to
# float)
estimators = all_estimators()
regressors = [(name, E) for name, E in estimators if issubclass(E,
RegressorMixin)]
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X = Scaler().fit_transform(X)
y = np.random.randint(2, size=X.shape[0])
for name, Reg in regressors:
if Reg in dont_test or Reg in meta_estimators or Reg in (CCA,):
continue
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
reg1 = Reg()
reg2 = Reg()
if hasattr(reg1, 'alpha'):
reg1.set_params(alpha=0.01)
reg2.set_params(alpha=0.01)
if hasattr(reg1, 'random_state'):
reg1.set_params(random_state=0)
reg2.set_params(random_state=0)
if Reg in (_PLS, PLSCanonical, PLSRegression):
y_ = np.vstack([y, 2 * y + np.random.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
reg1.fit(X, y_)
pred1 = reg1.predict(X)
reg2.fit(X, y_.astype(np.float))
pred2 = reg2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def test_regressors_train():
estimators = all_estimators()
regressors = [(name, E) for name, E in estimators if issubclass(E,
RegressorMixin)]
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
# TODO: test with intercept
# TODO: test with multiple responses
X = Scaler().fit_transform(X)
y = Scaler().fit_transform(y)
succeeded = True
for name, Reg in regressors:
if Reg in dont_test or Reg in meta_estimators:
continue
# catch deprecation warnings
with warnings.catch_warnings(record=True):
reg = Reg()
if hasattr(reg, 'alpha'):
reg.set_params(alpha=0.01)
# raises error on malformed input for fit
assert_raises(ValueError, reg.fit, X, y[:-1])
# fit
try:
if Reg in (_PLS, PLSCanonical, PLSRegression, CCA):
y_ = np.vstack([y, 2 * y + np.random.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
reg.fit(X, y_)
reg.predict(X)
if Reg not in (PLSCanonical, CCA): # TODO: find out why
assert_greater(reg.score(X, y_), 0.5)
except Exception as e:
print(reg)
print e
print
succeeded = False
assert_true(succeeded)
def test_configure():
# Smoke test the 'configure' step of setup, this tests all the
# 'configure' functions in the setup.pys in the scikit
cwd = os.getcwd()
setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], '..'))
setup_filename = os.path.join(setup_path, 'setup.py')
if not os.path.exists(setup_filename):
return
try:
os.chdir(setup_path)
old_argv = sys.argv
sys.argv = ['setup.py', 'config']
with warnings.catch_warnings():
# The configuration spits out warnings when not finding
# Blas/Atlas development headers
warnings.simplefilter('ignore', UserWarning)
execfile('setup.py', dict(__name__='__main__'))
finally:
sys.argv = old_argv
os.chdir(cwd)
| devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/sklearn/tests/test_common.py | Python | agpl-3.0 | 18,746 |
# Copyright (c) 2016, Neil Booth
#
# All rights reserved.
#
# See the file "LICENCE" for information about the copyright
# and warranty status of this software.
'''Class for handling environment configuration and defaults.'''
import re
import resource
from collections import namedtuple
from ipaddress import ip_address
from lib.coins import Coin
from lib.env_base import EnvBase
import lib.util as lib_util
import server.version as version
NetIdentity = namedtuple('NetIdentity', 'host tcp_port ssl_port nick_suffix')
class Env(EnvBase):
'''Wraps environment configuration.'''
# Peer discovery
PD_OFF, PD_SELF, PD_ON = range(3)
def __init__(self):
super().__init__()
self.obsolete(['UTXO_MB', 'HIST_MB', 'NETWORK'])
self.db_dir = self.required('DB_DIRECTORY')
self.db_engine = self.default('DB_ENGINE', 'leveldb')
self.daemon_url = self.required('DAEMON_URL')
coin_name = self.required('COIN').strip()
network = self.default('NET', 'mainnet').strip()
self.coin = Coin.lookup_coin_class(coin_name, network)
self.cache_MB = self.integer('CACHE_MB', 1200)
self.host = self.default('HOST', 'localhost')
self.reorg_limit = self.integer('REORG_LIMIT', self.coin.REORG_LIMIT)
# Server stuff
self.tcp_port = self.integer('TCP_PORT', None)
self.ssl_port = self.integer('SSL_PORT', None)
if self.ssl_port:
self.ssl_certfile = self.required('SSL_CERTFILE')
self.ssl_keyfile = self.required('SSL_KEYFILE')
self.rpc_port = self.integer('RPC_PORT', 8000)
self.max_subscriptions = self.integer('MAX_SUBSCRIPTIONS', 10000)
self.banner_file = self.default('BANNER_FILE', None)
self.tor_banner_file = self.default('TOR_BANNER_FILE',
self.banner_file)
self.anon_logs = self.boolean('ANON_LOGS', False)
self.log_sessions = self.integer('LOG_SESSIONS', 3600)
# Peer discovery
self.peer_discovery = self.peer_discovery_enum()
self.peer_announce = self.boolean('PEER_ANNOUNCE', True)
self.force_proxy = self.boolean('FORCE_PROXY', False)
self.tor_proxy_host = self.default('TOR_PROXY_HOST', 'localhost')
self.tor_proxy_port = self.integer('TOR_PROXY_PORT', None)
# The electrum client takes the empty string as unspecified
self.donation_address = self.default('DONATION_ADDRESS', '')
# Server limits to help prevent DoS
self.max_send = self.integer('MAX_SEND', 1000000)
self.max_subs = self.integer('MAX_SUBS', 250000)
self.max_sessions = self.sane_max_sessions()
self.max_session_subs = self.integer('MAX_SESSION_SUBS', 50000)
self.bandwidth_limit = self.integer('BANDWIDTH_LIMIT', 2000000)
self.session_timeout = self.integer('SESSION_TIMEOUT', 600)
self.drop_client = self.custom("DROP_CLIENT", None, re.compile)
# Identities
clearnet_identity = self.clearnet_identity()
tor_identity = self.tor_identity(clearnet_identity)
self.identities = [identity
for identity in (clearnet_identity, tor_identity)
if identity is not None]
def sane_max_sessions(self):
'''Return the maximum number of sessions to permit. Normally this
is MAX_SESSIONS. However, to prevent open file exhaustion, ajdust
downwards if running with a small open file rlimit.'''
env_value = self.integer('MAX_SESSIONS', 1000)
nofile_limit = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
# We give the DB 250 files; allow ElectrumX 100 for itself
value = max(0, min(env_value, nofile_limit - 350))
if value < env_value:
self.log_warning('lowered maximum sessions from {:,d} to {:,d} '
'because your open file limit is {:,d}'
.format(env_value, value, nofile_limit))
return value
def clearnet_identity(self):
host = self.default('REPORT_HOST', None)
if host is None:
return None
try:
ip = ip_address(host)
except ValueError:
bad = (not lib_util.is_valid_hostname(host)
or host.lower() == 'localhost')
else:
bad = (ip.is_multicast or ip.is_unspecified
or (ip.is_private and self.peer_announce))
if bad:
raise self.Error('"{}" is not a valid REPORT_HOST'.format(host))
tcp_port = self.integer('REPORT_TCP_PORT', self.tcp_port) or None
ssl_port = self.integer('REPORT_SSL_PORT', self.ssl_port) or None
if tcp_port == ssl_port:
raise self.Error('REPORT_TCP_PORT and REPORT_SSL_PORT '
'both resolve to {}'.format(tcp_port))
return NetIdentity(
host,
tcp_port,
ssl_port,
''
)
def tor_identity(self, clearnet):
host = self.default('REPORT_HOST_TOR', None)
if host is None:
return None
if not host.endswith('.onion'):
raise self.Error('tor host "{}" must end with ".onion"'
.format(host))
def port(port_kind):
'''Returns the clearnet identity port, if any and not zero,
otherwise the listening port.'''
result = 0
if clearnet:
result = getattr(clearnet, port_kind)
return result or getattr(self, port_kind)
tcp_port = self.integer('REPORT_TCP_PORT_TOR', port('tcp_port')) or None
ssl_port = self.integer('REPORT_SSL_PORT_TOR', port('ssl_port')) or None
if tcp_port == ssl_port:
raise self.Error('REPORT_TCP_PORT_TOR and REPORT_SSL_PORT_TOR '
'both resolve to {}'.format(tcp_port))
return NetIdentity(
host,
tcp_port,
ssl_port,
'_tor',
)
def server_features(self):
'''Return the server features dictionary.'''
hosts = {identity.host: {'tcp_port': identity.tcp_port,
'ssl_port': identity.ssl_port}
for identity in self.identities}
return {
'hosts': hosts,
'pruning': None,
'server_version': version.VERSION,
'protocol_min': version.PROTOCOL_MIN,
'protocol_max': version.PROTOCOL_MAX,
'genesis_hash': self.coin.GENESIS_HASH,
'hash_function': 'sha256',
}
def peer_discovery_enum(self):
pd = self.default('PEER_DISCOVERY', 'on').strip().lower()
if pd in ('off', ''):
return self.PD_OFF
elif pd == 'self':
return self.PD_SELF
else:
return self.PD_ON
| shsmith/electrumx | server/env.py | Python | mit | 6,927 |
#!/usr/bin/env python
# coding: utf-8
from capytaine.ui.vtk.mesh_viewer import MeshViewer
from capytaine.ui.vtk.body_viewer import FloatingBodyViewer
from capytaine.ui.vtk.animation import Animation
| mancellin/capytaine | capytaine/ui/vtk/__init__.py | Python | gpl-3.0 | 200 |
from common_fixtures import * # NOQA
def test_link_instance_stop_start(super_client, client, context):
target1 = context.create_container(ports=['180', '122/udp'])
target2 = context.create_container(ports=['280', '222/udp'])
c = context.create_container(instanceLinks={
'target1_link': target1.id,
'target2_link': target2.id})
assert c.state == 'running'
ports = set()
for link in c.instanceLinks():
for port in super_client.reload(link).data.fields.ports:
ports.add('{}:{}'.format(port.publicPort, port.privatePort))
assert len(ports) > 0
new_ports = set()
c = client.wait_success(c.stop())
assert c.state == 'stopped'
for link in super_client.reload(c).instanceLinks():
assert len(link.data.fields.ports) == 2
for port in link.data.fields.ports:
new_ports.add('{}:{}'.format(port.publicPort, port.privatePort))
assert ports == new_ports
new_ports = set()
c = client.wait_success(c.start())
assert c.state == 'running'
for link in super_client.reload(c).instanceLinks():
assert len(link.data.fields.ports) == 2
for port in link.data.fields.ports:
new_ports.add('{}:{}'.format(port.publicPort, port.privatePort))
assert ports == new_ports
def _find_agent_instance_ip(nsp, source):
assert source is not None
vnet_id = source.nics()[0].vnetId
assert vnet_id is not None
for agent_instance in nsp.instances():
if agent_instance.nics()[0].vnetId == vnet_id:
assert agent_instance.primaryIpAddress is not None
return agent_instance.primaryIpAddress
assert False, 'Failed to find agent instance for ' + source.id
def test_link_create(client, super_client, context):
target1 = context.create_container(ports=['180', '122/udp'])
target2 = context.create_container(ports=['280', '222/udp'])
c = context.create_container(instanceLinks={
'target1_link': target1.id,
'target2_link': target2.id})
assert c.state == 'running'
assert len(c.instanceLinks()) == 2
assert len(target1.targetInstanceLinks()) == 1
assert len(target2.targetInstanceLinks()) == 1
links = c.instanceLinks()
names = set([x.linkName for x in links])
assert names == set(['target1_link', 'target2_link'])
for link in links:
link = super_client.reload(link)
assert link.state == 'active'
assert link.instanceId == c.id
ip_address = _find_agent_instance_ip(context.nsp,
super_client.reload(c))
if link.linkName == 'target1_link':
assert link.targetInstanceId == target1.id
assert len(link.data.fields.ports) == 2
for port in link.data.fields.ports:
assert port.ipAddress == ip_address
assert port.publicPort is not None
if port.privatePort == 180:
assert port.protocol == 'tcp'
elif port.privatePort == 122:
assert port.protocol == 'udp'
else:
assert False
if link.linkName == 'target2_link':
assert link.targetInstanceId == target2.id
assert len(link.data.fields.ports) == 2
for port in link.data.fields.ports:
assert port.ipAddress == ip_address
assert port.publicPort is not None
if port.privatePort == 280:
assert port.protocol == 'tcp'
elif port.privatePort == 222:
assert port.protocol == 'udp'
else:
assert False
def test_link_update(client, context):
target1 = context.create_container()
target2 = context.create_container()
c = context.create_container(instanceLinks={
'target1_link': target1.id,
})
link = c.instanceLinks()[0]
assert link.targetInstanceId == target1.id
link.targetInstanceId = target2.id
link = client.update(link, link)
assert link.state == 'updating-active'
link = client.wait_success(link)
assert link.targetInstanceId == target2.id
assert link.state == 'active'
def test_link_remove_restore(client, context):
target1 = context.create_container()
c = client.create_container(imageUuid=context.image_uuid,
startOnCreate=False,
instanceLinks={
'target1_link': target1.id})
c = client.wait_success(c)
links = c.instanceLinks()
assert len(links) == 1
link = links[0]
assert link.state == 'inactive'
c = client.wait_success(c.start())
link = client.reload(link)
assert c.state == 'running'
assert link.state == 'active'
c = client.wait_success(c.stop())
link = client.reload(link)
assert c.state == 'stopped'
assert link.state == 'inactive'
c = client.wait_success(client.delete(c))
link = client.reload(link)
assert c.state == 'removed'
assert link.state == 'inactive'
c = client.wait_success(c.restore())
link = client.reload(link)
assert c.state == 'stopped'
assert link.state == 'inactive'
c = client.wait_success(client.delete(c))
link = client.reload(link)
assert c.state == 'removed'
assert link.state == 'inactive'
c = client.wait_success(c.purge())
link = client.reload(link)
assert c.state == 'purged'
assert link.state == 'removed'
def test_null_links(context):
c = context.create_container(instanceLinks={
'null_link': None
})
links = c.instanceLinks()
assert len(links) == 1
assert links[0].state == 'active'
assert links[0].linkName == 'null_link'
assert links[0].targetInstanceId is None
def test_link_timeout(super_client, client, context):
t = client.create_container(imageUuid=context.image_uuid,
startOnCreate=False)
c = super_client.create_container(accountId=context.project.id,
imageUuid=context.image_uuid,
instanceLinks={'t': t.id},
data={'linkWaitTime': 100})
c = client.wait_transitioning(c)
assert c.state == 'running'
def test_link_remove_instance_restart(client, super_client, context):
target1 = context.create_container()
c = client.create_container(imageUuid=context.image_uuid,
startOnCreate=False,
instanceLinks={
'target1_link': target1.id})
c = client.wait_success(c)
links = c.instanceLinks()
assert len(links) == 1
link = links[0]
assert link.state == 'inactive'
c = client.wait_success(c.start())
link = client.reload(link)
assert c.state == 'running'
assert link.state == 'active'
c = client.wait_success(c.stop())
assert c.state == 'stopped'
link = client.reload(link)
link = super_client.wait_success(link.remove())
assert link.state == 'removed'
c = client.wait_success(c.start())
assert c.state == 'running'
| jimengliu/cattle | tests/integration/cattletest/core/test_link.py | Python | apache-2.0 | 7,262 |
# Author: Patrick Ohly <[email protected]>
# Copyright: Copyright (C) 2015 Intel Corporation
#
# This file is licensed under the MIT license, see COPYING.MIT in
# this source distribution for the terms.
# A custom scheduler for bitbake which runs rm_work tasks more
# aggressively than the default schedule chosen by rm_work.
# To use it, add to local.conf:
# BB_SCHEDULERS = "rmwork.RunQueueSchedulerRmWork"
# BB_SCHEDULER = "rmwork"
# Then run:
# PYTHONPATH=<path to the directory with rmwork.py> bitbake ...
#
# Optionally, set BB_NUMBER_COMPILE_THREADS to a number <= BB_NUMBER_THREADS
# to limit the number of compile tasks running in parallel. In other words,
# this allows to run more light tasks without overloading the machine with too
# many heavy compile tasks.
import bb
from bb.runqueue import RunQueueSchedulerSpeed as BaseScheduler
import time
class RunQueueSchedulerRmWork(BaseScheduler):
"""
Similar to RunQueueSchedulerCompletion, but in addition, rm_work tasks
get a priority higher than anything else and get run even when the maximum
number of tasks is reached. Together this ensures that nothing blocks running
the rm_work tasks once they become ready to run.
"""
name = "rmwork"
def __init__(self, runqueue, rqdata):
BaseScheduler.__init__(self, runqueue, rqdata)
self.number_compile_tasks = int(self.rq.cfgData.getVar("BB_NUMBER_COMPILE_THREADS", True) or \
self.rq.number_tasks)
if self.number_compile_tasks > self.rq.number_tasks:
bb.fatal("BB_NUMBER_COMPILE_THREADS %d must be <= BB_NUMBER_THREADS %d" % \
(self.number_compile_tasks, self.rq.number_tasks))
bb.note('BB_NUMBER_COMPILE_THREADS %d BB_NUMBER_THREADS %d' % \
(self.number_compile_tasks, self.rq.number_tasks))
# Extract list of tasks for each recipe, with tasks sorted
# ascending from "must run first" (typically do_fetch) to
# "runs last" (do_rm_work). Both the speed and completion
# schedule prioritize tasks that must run first before the ones
# that run later; this is what we depend on here.
task_lists = {}
for taskid in self.prio_map:
fn = self.rqdata.get_task_file(taskid)
taskname = self.rqdata.get_task_name(taskid)
task_lists.setdefault(fn, []).append(taskname)
# Now unify the different task lists. The strategy is that
# common tasks get skipped and new ones get inserted after the
# preceeding common one(s) as they are found. Because task
# lists should differ only by their number of tasks, but not
# the ordering of the common tasks, this should result in a
# deterministic result that is a superset of the individual
# task ordering.
all_tasks = task_lists.itervalues().next()
for recipe, new_tasks in task_lists.iteritems():
index = 0
old_task = all_tasks[index] if index < len(all_tasks) else None
for new_task in new_tasks:
if old_task == new_task:
# Common task, skip it. This is the fast-path which
# avoids a full search.
index += 1
old_task = all_tasks[index] if index < len(all_tasks) else None
else:
try:
index = all_tasks.index(new_task)
# Already present, just not at the current
# place. We re-synchronized by changing the
# index so that it matches again. Now
# move on to the next existing task.
index += 1
old_task = all_tasks[index] if index < len(all_tasks) else None
except ValueError:
# Not present. Insert before old_task, which
# remains the same (but gets shifted back).
# bb.note('Inserting new task %s from %s after %s at %d into %s' %
# (new_task, recipe,
# all_tasks[index - 1] if index > 0 and all_tasks else None,
# index,
# all_tasks))
all_tasks.insert(index, new_task)
index += 1
# bb.note('merged task list: %s' % all_tasks)
# Now reverse the order so that tasks that finish the work on one
# recipe are considered more imporant (= come first). The ordering
# is now so that do_rm_work[_all] is most important.
all_tasks.reverse()
# Group tasks of the same kind before tasks of less important
# kinds at the head of the queue (because earlier = lower
# priority number = runs earlier), while preserving the
# ordering by recipe. If recipe foo is more important than
# bar, then the goal is to work on foo's do_populate_sysroot
# before bar's do_populate_sysroot and on the more important
# tasks of foo before any of the less important tasks in any
# other recipe (if those other recipes are more important than
# foo).
#
# All of this only applies when tasks are runable. Explicit
# dependencies still override this ordering by priority.
#
# Here's an example why this priority re-ordering helps with
# minimizing disk usage. Consider a recipe foo with a higher
# priority than bar where foo DEPENDS on bar. Then the
# implicit rule (from base.bbclass) is that foo's do_configure
# depends on bar's do_populate_sysroot. This ensures that
# bar's do_populate_sysroot gets done first. Normally the
# tasks from foo would continue to run once that is done, and
# bar only gets completed and cleaned up later. By ordering
# bar's task after do_populate_sysroot before foo's
# do_configure, that problem gets avoided.
task_index = 0
# self.dump_prio('Original priorities from %s' % BaseScheduler)
for task in all_tasks:
for index in xrange(task_index, self.numTasks):
taskid = self.prio_map[index]
taskname = self.rqdata.runq_task[taskid]
if taskname == task:
del self.prio_map[index]
self.prio_map.insert(task_index, taskid)
task_index += 1
# self.dump_prio('rmwork priorities')
def next(self):
taskid = self.next_buildable_task()
if taskid is not None:
if self.rq.stats.active < self.rq.number_tasks:
# Impose additional constraint on the number of compile tasks.
# The reason is that each compile task itself is allowed to run
# multiple processes, and therefore it makes sense to run less
# of them without also limiting the number of other tasks.
taskname = self.rqdata.runq_task[taskid]
if taskname == 'do_compile':
active = [x for x in xrange(self.numTasks) if \
self.rq.runq_running[x] and not self.rq.runq_complete[x]]
active_compile = [x for x in active if self.rqdata.runq_task[x] == 'do_compile']
if len(active_compile) >= self.number_compile_tasks:
# bb.note('Not starting compile task %s, already have %d running: %s' % \
# (self.describe_task(taskid),
# len(active_compile),
# [self.describe_task(x) for x in active]))
# Enabling the debug output above shows that it gets triggered even
# when nothing changed. Returning None here seems to trigger some kind of
# busy polling. Work around that for now by sleeping.
time.sleep(0.1)
return None
return taskid
def describe_task(self, taskid):
result = 'ID %d: %s' % (taskid, self.rqdata.get_user_idstring(taskid))
if self.rev_prio_map:
result = result + (' pri %d' % self.rev_prio_map[taskid])
return result
def dump_prio(self, comment):
bb.note('%s (most important first):\n%s' %
(comment,
'\n'.join(['%d. %s' % (index + 1, self.describe_task(taskid)) for
index, taskid in enumerate(self.prio_map)])))
| acornea/meta-intel-iot-security | scripts/rmwork.py | Python | mit | 8,723 |
import time
import pytest
from kervi.application import Application
import kervi.utility.nethelper as nethelper
APP_READY = False
MODULE_LOADED = None
def app_ready(module_name):
global APP_READY
APP_READY = True
def module_loaded(value):
global MODULE_LOADED
MODULE_LOADED = value
@pytest.mark.slow
def test_application():
app = Application(
{
"modules":["app_module"],
"network":{
"ip": "127.0.0.1"
}
})
app.spine.register_event_handler("appReady", app_ready)
app.spine.register_command_handler("signalModuleLoad", module_loaded)
assert app.config.application.id == "kervi"
assert app.config.modules == ["app_module"]
assert app.config.network.ip == "127.0.0.1"
app._xrun()
process_info = app.spine.send_query("getProcessInfo")
time.sleep(5)
app.stop(False)
assert APP_READY
assert MODULE_LOADED == "test_x"
assert len(process_info) == 4
processes = ["application", "plugin_kervi.plugin.ipc.websocket", "plugin_kervi.plugin.ui.web", "app_module"]
for process in process_info:
assert process["id"] in processes
| kervi/kervi | kervi/tests/test_application.py | Python | mit | 1,175 |
"""
Copyright 2017 Peter Urda
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from rest_framework import viewsets
from rest_framework.decorators import detail_route
from rest_framework.response import Response
from django_discord.models import (
GuildMembership,
User,
)
from ..serializers import (
UserGuildSerializer,
UserSerializer,
)
class UserViewSet(viewsets.ModelViewSet):
"""ViewSet for Discord Users"""
queryset = User.objects.all().order_by('id')
serializer_class = UserSerializer
http_method_names = [
'get',
'post',
'patch',
'head',
'options',
]
@detail_route()
def guilds(self, request, pk):
queryset = GuildMembership.objects.filter(user__id=pk).order_by('id')
serializer = UserGuildSerializer(
queryset,
context={'request': request},
many=True,
)
return Response(serializer.data)
| urda/mr.butler | web/api/django_discord/views/user_view_set.py | Python | apache-2.0 | 1,436 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_templates
short_description: Module to manage virtual machine templates in oVirt/RHV
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "Module to manage virtual machine templates in oVirt/RHV."
options:
name:
description:
- "Name of the template to manage."
required: true
state:
description:
- "Should the template be present/absent/exported/imported"
choices: ['present', 'absent', 'exported', 'imported']
default: present
vm:
description:
- "Name of the VM, which will be used to create template."
description:
description:
- "Description of the template."
cpu_profile:
description:
- "CPU profile to be set to template."
cluster:
description:
- "Name of the cluster, where template should be created/imported."
exclusive:
description:
- "When C(state) is I(exported) this parameter indicates if the existing templates with the
same name should be overwritten."
export_domain:
description:
- "When C(state) is I(exported) or I(imported) this parameter specifies the name of the
export storage domain."
image_provider:
description:
- "When C(state) is I(imported) this parameter specifies the name of the image provider to be used."
image_disk:
description:
- "When C(state) is I(imported) and C(image_provider) is used this parameter specifies the name of disk
to be imported as template."
storage_domain:
description:
- "When C(state) is I(imported) this parameter specifies the name of the destination data storage domain."
clone_permissions:
description:
- "If I(True) then the permissions of the VM (only the direct ones, not the inherited ones)
will be copied to the created template."
- "This parameter is used only when C(state) I(present)."
default: False
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Create template from vm
- ovirt_templates:
cluster: Default
name: mytemplate
vm: rhel7
cpu_profile: Default
description: Test
# Import template
- ovirt_templates:
state: imported
name: mytemplate
export_domain: myexport
storage_domain: mystorage
cluster: mycluster
# Remove template
- ovirt_templates:
state: absent
name: mytemplate
'''
RETURN = '''
id:
description: ID of the template which is managed
returned: On success if template is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
template:
description: "Dictionary of all the template attributes. Template attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/template."
returned: On success if template is found.
type: dict
'''
import time
import traceback
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
create_connection,
equal,
get_dict_of_struct,
get_link_name,
ovirt_full_argument_spec,
search_by_attributes,
search_by_name,
)
class TemplatesModule(BaseModule):
def build_entity(self):
return otypes.Template(
name=self._module.params['name'],
cluster=otypes.Cluster(
name=self._module.params['cluster']
) if self._module.params['cluster'] else None,
vm=otypes.Vm(
name=self._module.params['vm']
) if self._module.params['vm'] else None,
description=self._module.params['description'],
cpu_profile=otypes.CpuProfile(
id=search_by_name(
self._connection.system_service().cpu_profiles_service(),
self._module.params['cpu_profile'],
).id
) if self._module.params['cpu_profile'] else None,
)
def update_check(self, entity):
return (
equal(self._module.params.get('cluster'), get_link_name(self._connection, entity.cluster)) and
equal(self._module.params.get('description'), entity.description) and
equal(self._module.params.get('cpu_profile'), get_link_name(self._connection, entity.cpu_profile))
)
def _get_export_domain_service(self):
provider_name = self._module.params['export_domain'] or self._module.params['image_provider']
export_sds_service = self._connection.system_service().storage_domains_service()
export_sd = search_by_name(export_sds_service, provider_name)
if export_sd is None:
raise ValueError(
"Export storage domain/Image Provider '%s' wasn't found." % provider_name
)
return export_sds_service.service(export_sd.id)
def post_export_action(self, entity):
self._service = self._get_export_domain_service().templates_service()
def post_import_action(self, entity):
self._service = self._connection.system_service().templates_service()
def wait_for_import(module, templates_service):
if module.params['wait']:
start = time.time()
timeout = module.params['timeout']
poll_interval = module.params['poll_interval']
while time.time() < start + timeout:
template = search_by_name(templates_service, module.params['name'])
if template:
return template
time.sleep(poll_interval)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent', 'exported', 'imported'],
default='present',
),
name=dict(default=None, required=True),
vm=dict(default=None),
description=dict(default=None),
cluster=dict(default=None),
cpu_profile=dict(default=None),
disks=dict(default=[], type='list'),
clone_permissions=dict(type='bool'),
export_domain=dict(default=None),
storage_domain=dict(default=None),
exclusive=dict(type='bool'),
image_provider=dict(default=None),
image_disk=dict(default=None),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
templates_service = connection.system_service().templates_service()
templates_module = TemplatesModule(
connection=connection,
module=module,
service=templates_service,
)
state = module.params['state']
if state == 'present':
ret = templates_module.create(
result_state=otypes.TemplateStatus.OK,
clone_permissions=module.params['clone_permissions'],
)
elif state == 'absent':
ret = templates_module.remove()
elif state == 'exported':
template = templates_module.search_entity()
export_service = templates_module._get_export_domain_service()
export_template = search_by_attributes(export_service.templates_service(), id=template.id)
ret = templates_module.action(
entity=template,
action='export',
action_condition=lambda t: export_template is None,
wait_condition=lambda t: t is not None,
post_action=templates_module.post_export_action,
storage_domain=otypes.StorageDomain(id=export_service.get().id),
exclusive=module.params['exclusive'],
)
elif state == 'imported':
template = templates_module.search_entity()
if template:
ret = templates_module.create(
result_state=otypes.TemplateStatus.OK,
)
else:
kwargs = {}
if module.params['image_provider']:
kwargs.update(
disk=otypes.Disk(
name=module.params['image_disk']
),
template=otypes.Template(
name=module.params['name'],
),
import_as_template=True,
)
if module.params['image_disk']:
# We need to refresh storage domain to get list of images:
templates_module._get_export_domain_service().images_service().list()
glance_service = connection.system_service().openstack_image_providers_service()
image_provider = search_by_name(glance_service, module.params['image_provider'])
images_service = glance_service.service(image_provider.id).images_service()
else:
images_service = templates_module._get_export_domain_service().templates_service()
template_name = module.params['image_disk'] or module.params['name']
entity = search_by_name(images_service, template_name)
if entity is None:
raise Exception("Image/template '%s' was not found." % template_name)
images_service.service(entity.id).import_(
storage_domain=otypes.StorageDomain(
name=module.params['storage_domain']
) if module.params['storage_domain'] else None,
cluster=otypes.Cluster(
name=module.params['cluster']
) if module.params['cluster'] else None,
**kwargs
)
template = wait_for_import(module, templates_service)
ret = {
'changed': True,
'id': template.id,
'template': get_dict_of_struct(template),
}
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
| RackSec/ansible | lib/ansible/modules/cloud/ovirt/ovirt_templates.py | Python | gpl-3.0 | 11,527 |
#
# Copyright 2016-2018 Universidad Complutense de Madrid
#
# This file is part of PyEmir
#
# SPDX-License-Identifier: GPL-3.0+
# License-Filename: LICENSE.txt
#
"""
Spectroscopy mode, Stare Spectra
"""
from numina.core import Result
from numina.array.combine import median
from numina.processing.combine import basic_processing_with_combination
from emirdrp.core.recipe import EmirRecipe
import emirdrp.requirements as reqs
import emirdrp.products as prods
from emirdrp.processing.wavecal.apply_rectwv_coeff import apply_rectwv_coeff
from emirdrp.processing.wavecal.rectwv_coeff_from_mos_library \
import rectwv_coeff_from_mos_library
from emirdrp.processing.wavecal.rectwv_coeff_to_ds9 import save_four_ds9
class SkySpecRecipe(EmirRecipe):
"""Recipe to process data taken in spectral sky mode.
"""
obresult = reqs.ObservationResultRequirement()
master_bpm = reqs.MasterBadPixelMaskRequirement()
master_bias = reqs.MasterBiasRequirement()
master_dark = reqs.MasterDarkRequirement()
master_flat = reqs.MasterSpectralFlatFieldRequirement()
master_rectwv = reqs.MasterRectWaveRequirement()
skyspec = Result(prods.SkySpectrum)
reduced_image = Result(prods.ProcessedMOS)
def run(self, rinput):
self.logger.info('starting spectral sky reduction')
flow = self.init_filters(rinput)
reduced_image = basic_processing_with_combination(
rinput, flow,
method=median,
errors=True
)
hdr = reduced_image[0].header
self.set_base_headers(hdr)
# save intermediate image in work directory
self.save_intermediate_img(reduced_image, 'reduced_image.fits')
# RectWaveCoeff object with rectification and wavelength calibration
# coefficients for the particular CSU configuration
rectwv_coeff = rectwv_coeff_from_mos_library(
reduced_image,
rinput.master_rectwv
)
# save as JSON file in work directory
self.save_structured_as_json(rectwv_coeff, 'rectwv_coeff.json')
# generate associated ds9 region files and save them in work directory
if self.intermediate_results:
save_four_ds9(rectwv_coeff)
# apply rectification and wavelength calibration
skyspec = apply_rectwv_coeff(
reduced_image,
rectwv_coeff
)
self.logger.info('end sky spectral reduction')
result = self.create_result(
reduced_image=reduced_image,
skyspec=skyspec
)
return result
| nicocardiel/pyemir | emirdrp/recipes/spec/sky.py | Python | gpl-3.0 | 2,588 |
#!/usr/bin/env python
import binascii
from collections import OrderedDict
import unittest
import argparse
import re
import subprocess
import sys
import os.path
import vecparser
cli_binary = ""
SUPPORTED_ALGORITHMS = [
'AES-128/CFB',
'AES-192/CFB',
'AES-256/CFB',
'AES-128/GCM',
'AES-192/GCM',
'AES-256/GCM',
'AES-128/OCB',
'AES-128/XTS',
'AES-256/XTS'
]
def append_ordered(base, additional_elements):
for key in additional_elements:
value = additional_elements[key]
base[key] = value
class TestSequence(unittest.TestCase):
pass
def create_test(data):
def do_test_expected(self):
iv = data['Nonce']
key = data['Key']
ad = data['AD'] if 'AD' in data else ""
plaintext = data['In'].lower()
ciphertext = data['Out'].lower()
algorithm = data['Algorithm']
direction = data['Direction']
# CFB
if algorithm == "AES-128/CFB":
mode = "aes-128-cfb"
elif algorithm == "AES-192/CFB":
mode = "aes-192-cfb"
elif algorithm == "AES-256/CFB":
mode = "aes-256-cfb"
# GCM
elif algorithm == "AES-128/GCM":
mode = "aes-128-gcm"
elif algorithm == "AES-192/GCM":
mode = "aes-192-gcm"
elif algorithm == "AES-256/GCM":
mode = "aes-256-gcm"
# OCB
elif algorithm == "AES-128/OCB":
mode = "aes-128-ocb"
# XTS
elif algorithm == "AES-128/XTS":
mode = "aes-128-xts"
elif algorithm == "AES-256/XTS":
mode = "aes-256-xts"
else: raise Exception("Unknown algorithm: '" + algorithm + "'")
cmd = [
cli_binary,
"encryption",
"--mode=%s" % mode,
"--iv=%s" % iv,
"--ad=%s" % ad,
"--key=%s" % key]
if direction == "decrypt":
cmd += ['--decrypt']
# out_raw = subprocess.check_output(cmd)
if direction == "decrypt":
invalue = ciphertext
else:
invalue = plaintext
#print(cmd)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
out_raw = p.communicate(input=binascii.unhexlify(invalue))[0]
out = binascii.hexlify(out_raw).decode("UTF-8").lower()
# Renamings
if direction == "decrypt":
expected = plaintext
else:
expected = ciphertext
actual = out
self.assertEqual(expected, actual)
return do_test_expected
def get_testdata(document):
out = OrderedDict()
for algorithm in document:
if algorithm in SUPPORTED_ALGORITHMS:
testcase_number = 0
for testcase in document[algorithm]:
testcase_number += 1
for direction in ['encrypt', 'decrypt']:
testname = "{} no {:0>3} ({})".format(
algorithm.lower(), testcase_number, direction)
testname = re.sub("[^-a-z0-9-]", "_", testname)
testname = re.sub("_+", "_", testname)
testname = testname.strip("_")
out[testname] = {}
for key in testcase:
value = testcase[key]
out[testname][key] = value
out[testname]['Algorithm'] = algorithm
out[testname]['Direction'] = direction
return out
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="")
parser.add_argument('cli_binary',
help='path to the botan cli binary')
parser.add_argument('unittest_args', nargs="*")
args = parser.parse_args()
cli_binary = args.cli_binary
vecfile_cfb = vecparser.VecDocument(os.path.join('src', 'tests', 'data', 'modes', 'cfb.vec'))
vecfile_gcm = vecparser.VecDocument(os.path.join('src', 'tests', 'data', 'aead', 'gcm.vec'))
vecfile_ocb = vecparser.VecDocument(os.path.join('src', 'tests', 'data', 'aead', 'ocb.vec'))
vecfile_xts = vecparser.VecDocument(os.path.join('src', 'tests', 'data', 'modes', 'xts.vec'))
#data = vecfile.get_data()
#for algo in data:
# print(algo)
# i = 0
# for testcase in data[algo]:
# i += 1
# print(str(i) + ":", testcase)
testdata = OrderedDict()
append_ordered(testdata, get_testdata(vecfile_cfb.get_data()))
append_ordered(testdata, get_testdata(vecfile_gcm.get_data()))
append_ordered(testdata, get_testdata(vecfile_ocb.get_data()))
append_ordered(testdata, get_testdata(vecfile_xts.get_data()))
#for testname in testdata:
# print(testname)
# for key in testdata[testname]:
# print(" " + key + ": " + testdata[testname][key])
for testname in testdata:
test_method = create_test(testdata[testname])
test_method.__name__ = 'test_%s' % testname
setattr(TestSequence, test_method.__name__, test_method)
# Hand over sys.argv[0] and unittest_args to the testing framework
sys.argv[1:] = args.unittest_args
unittest.main()
| Rohde-Schwarz-Cybersecurity/botan | src/scripts/cli_tests.py | Python | bsd-2-clause | 5,200 |
#!/usr/bin/python
import json
import re
import sys
def Feature(name2, id2, **kwargs):
kwargs.update({'name2': name2, 'id2': id2})
return {'type': 'Feature',
'properties': kwargs,
'geometry': {'type': 'MultiPolygon', 'coordinates': []}}
FeatureFlag_SetBounds = 0x0001 # has partial subunits; set/extend item.bounds for this feature
FeatureFlag_SkipBounds = 0x0002 # don't extend parent/ancestor bounds for this feature
NPS_NezPerce_BuffaloEddy = Feature('Buffalo Eddy', 'be')
NPS_NezPerce_Spalding = Feature('Spalding', 'sp', W2='Spalding,_Idaho')
NPS_FortVancouver = Feature('Fort Vancouver', 'fv')
NPS_WA_NetulLanding = Feature('Netul Landing', 'netul')
NPS_WA_SunsetBeach = Feature('Sunset Beach', 'sunset')
NPS_Rainier_Wilkeson = Feature('Wilkeson', 'wilkeson', flags=FeatureFlag_SkipBounds)
NPS_SanJuan_EnglishCamp = Feature('English Camp', 'e')
G = None
XY2LL = None
SecondPoints = set()
HolesToRemove = ()
PolygonsToRemove = {
'nps_ca': (
#
# Golden Gate National Recreation Area
#
(-122.519694, 37.533448), # coastal area including Fitzgerald Marine Reserve
(-122.519702, 37.533489), # coastal area including Point Montara Light
(-122.517480, 37.539958), # small area near Montara
(-122.509973, 37.584858), # small area near Devil's Slide
(-122.513501, 37.594477), # Pedro Point Headlands
(-122.499031, 37.600852), # Pacifica State Beach
(-122.425814, 37.874280), # Angel Island State Park
#
# Lassen Volcanic National Park
#
(-121.600877, 40.348154), # Headquarters in Mineral, CA
#
# World War II Valor in the Pacific National Monument
#
(-157.954368, 21.363719), # HI
(-157.954916, 21.363748), # HI
(-157.949648, 21.364684), # HI
(-157.937538, 21.366363), # HI
(-157.936582, 21.367199), # HI
(-157.937516, 21.369237), # HI
),
'nps_hi': (
#
# World War II Valor in the Pacific National Monument
#
(-121.378507, 41.887758), # Tule Lake Unit (CA)
),
'nps_id': (
#
# Nez Perce National Historical Park
#
(-109.206051, 48.373056), # MT
(-117.224839, 45.337427), # OR
(-117.520354, 45.570223), # OR
#
# Minidoka National Historic Site
#
(-122.507464, 47.614825), # Bainbridge Island Japanese American Exclusion Memorial (WA)
),
'nps_mt': (
#
# Nez Perce National Historical Park
#
(-116.267642, 45.803354), # ID
(-115.959301, 46.076778), # ID
(-116.918370, 46.170823), # ID
(-116.935269, 46.173151), # ID
(-116.004092, 46.203655), # ID
(-116.818266, 46.445164), # ID
(-116.818326, 46.446846), # ID
(-116.817829, 46.446914), # ID
(-116.814847, 46.448688), # ID
(-116.810456, 46.449968), # ID
(-116.329538, 46.500551), # ID
(-117.224839, 45.337427), # OR
(-117.520354, 45.570223), # OR
),
'nps_nm': (
#
# Manhattan Project National Historical Park
#
(-84.317198, 35.928011), # Oak Ridge, TN: X-10 Graphite Reactor
(-84.394445, 35.938672), # Oak Ridge, TN: K-25 Building
(-84.256801, 35.984691), # Oak Ridge, TN: Y-12 Building 9204-3
(-84.255495, 35.985871), # Oak Ridge, TN: Y-12 Building 9731
(-119.387098, 46.587399), # Hanford, WA: Hanford High School
(-119.646295, 46.628954), # Hanford, WA: B Reactor
(-119.715131, 46.638641), # Hanford, WA: Bruggemann's Warehouse
(-119.618684, 46.644369), # Hanford, WA: Allard Pump House
(-119.478732, 46.660534), # Hanford, WA: White Bluffs Bank
),
'nps_or': (
#
# Nez Perce National Historical Park
#
(-116.267642, 45.803354), # ID
(-115.959301, 46.076778), # ID
(-116.918370, 46.170823), # ID
(-116.935269, 46.173151), # ID
(-116.004092, 46.203655), # ID
(-116.818266, 46.445164), # ID
(-116.818326, 46.446846), # ID
(-116.817829, 46.446914), # ID
(-116.814847, 46.448688), # ID
(-116.810456, 46.449968), # ID
(-116.329538, 46.500551), # ID
(-109.206051, 48.373056), # MT
),
'nps_tn': (
#
# Manhattan Project National Historical Park
#
(-106.264959, 35.840842), # Los Alamos, NM: Pajarito Site
(-106.345095, 35.843839), # Los Alamos, NM: V-Site
(-106.347393, 35.855718), # Los Alamos, NM: Gun Site
(-119.387098, 46.587399), # Hanford, WA: Hanford High School
(-119.646295, 46.628954), # Hanford, WA: B Reactor
(-119.715131, 46.638641), # Hanford, WA: Bruggemann's Warehouse
(-119.618684, 46.644369), # Hanford, WA: Allard Pump House
(-119.478732, 46.660534), # Hanford, WA: White Bluffs Bank
),
'nps_wa': (
#
# Manhattan Project National Historical Park
#
(-84.317198, 35.928011), # Oak Ridge, TN: X-10 Graphite Reactor
(-84.394445, 35.938672), # Oak Ridge, TN: K-25 Building
(-84.256801, 35.984691), # Oak Ridge, TN: Y-12 Building 9204-3
(-84.255495, 35.985871), # Oak Ridge, TN: Y-12 Building 9731
(-106.264959, 35.840842), # Los Alamos, NM: Pajarito Site
(-106.345095, 35.843839), # Los Alamos, NM: V-Site
(-106.347393, 35.855718), # Los Alamos, NM: Gun Site
#
# Minidoka National Historic Site
#
(-114.239971, 42.678247), # ID
(-114.249704, 42.692788), # ID
#
# Nez Perce National Historical Park
#
(-116.267642, 45.803354), # ID
(-115.959301, 46.076778), # ID
(-116.918370, 46.170823), # ID
(-116.935269, 46.173151), # ID
(-116.004092, 46.203655), # ID
(-116.818266, 46.445164), # ID
(-116.818326, 46.446846), # ID
(-116.817829, 46.446914), # ID
(-116.814847, 46.448688), # ID
(-116.810456, 46.449968), # ID
(-116.329538, 46.500551), # ID
(-109.206051, 48.373056), # MT
(-117.224839, 45.337427), # OR
(-117.520354, 45.570223), # OR
),
}
SeparateFeatures = {
#
# NPS -> AZ -> Montezuma Castle National Monument
#
(-111.825721, 34.613346): Feature('Montezuma Castle', 'castle'),
(-111.749979, 34.645673): Feature('Montezuma Well', 'well', W2='Montezuma_Well', flags=FeatureFlag_SkipBounds),
#
# NPS -> AZ -> Navajo National Monument
#
(-110.817732, 36.670105): Feature('Inscription House', 'ih', flags=FeatureFlag_SkipBounds),
(-110.537306, 36.689120): Feature('Betatakin', 'bt'),
(-110.501645, 36.764945): Feature('Keet Seel', 'ks', flags=FeatureFlag_SkipBounds),
#
# NPS -> AZ -> Saguaro National Park
#
(-110.498832, 32.230070): Feature('Rincon Mountain District', 'rmd'),
(-111.113908, 32.315900): Feature('Tucson Mountain District', 'tmd'),
#
# NPS -> AZ -> Tumacacori National Historical Park
#
(-110.901675, 31.408678): Feature('Los Santos Angeles de Guevavi', 'g',
W2='Mission_Los_Santos_%C3%81ngeles_de_Guevavi', flags=FeatureFlag_SkipBounds),
(-110.959223, 31.454015): Feature('San Cayetano de Calabazas', 'c',
W2='Mission_San_Cayetano_de_Calabazas', flags=FeatureFlag_SkipBounds),
(-111.043785, 31.569337): Feature('San Jose de Tumacacori', 't',
W2='Mission_San_Jos%C3%A9_de_Tumac%C3%A1cori'),
#
# NPS -> CA -> Channel Islands National Park
#
(-119.037070, 33.448103): Feature('Santa Barbara Island', 'sb', W2='Santa_Barbara_Island'),
(-120.270728, 34.001945): Feature('Santa Rosa Island', 'sr', W2='Santa_Rosa_Island_(California)'),
(-119.339176, 34.022787): Feature('Anacapa Island', 'ac', W2='Anacapa_Island'),
(-120.472997, 34.030254): Feature('San Miguel Island', 'sm', W2='San_Miguel_Island'),
(-119.949952, 34.060441): Feature('Santa Cruz Island', 'sc', W2='Santa_Cruz_Island'),
(-119.266819, 34.248069): Feature('Headquarters', 'hq', flags=FeatureFlag_SkipBounds),
#
# NPS -> CA -> Golden Gate National Recreation Area
#
(-122.486926, 37.642721): Feature('Milagra Ridge', 'mr', W2='Milagra_Ridge'),
(-122.509305, 37.776285): Feature('Sutro Heights Park', 'sh', W2='Sutro_Heights_Park'),
(-122.424849, 37.832023): Feature('Alcatraz Island', 'az', W2='Alcatraz_Island'),
#
# NPS -> CA -> Lava Beds National Monument
#
(-121.394089, 41.851072): Feature('Petroglyph Section', 'p', W2='Petroglyph_Point_Archeological_Site'),
#
# NPS -> CA -> World War II Valor in the Pacific National Monument
#
(-121.378507, 41.887758): Feature('Tule Lake Unit', 'tule',
W2='Tule_Lake_Unit,_World_War_II_Valor_in_the_Pacific_National_Monument'),
#
# NPS -> ID -> Nez Perce National Historical Park
#
(-116.267642, 45.803354): Feature('White Bird Battlefield', 'wb', W2='Battle_of_White_Bird_Canyon'),
(-115.959301, 46.076778): Feature('Clearwater Battlefield', 'cw', W2='Battle_of_the_Clearwater'),
(-116.918370, 46.170823): NPS_NezPerce_BuffaloEddy,
(-116.935269, 46.173151): NPS_NezPerce_BuffaloEddy,
(-116.004092, 46.203655): Feature('Heart of the Monster', 'hm'),
(-116.818266, 46.445164): NPS_NezPerce_Spalding,
(-116.818326, 46.446846): NPS_NezPerce_Spalding,
(-116.817829, 46.446914): NPS_NezPerce_Spalding,
(-116.814847, 46.448688): NPS_NezPerce_Spalding,
(-116.810456, 46.449968): NPS_NezPerce_Spalding,
(-116.329538, 46.500551): Feature('Canoe Camp', 'cc'),
#
# NPS -> MT -> Little Bighorn Battlefield National Monument
#
(-107.384146, 45.521082): Feature('Reno-Benteen Battlefield', 'rb'),
(-107.443667, 45.564359): Feature('Custer Battlefield', 'c'),
#
# NPS -> MT -> Nez Perce National Historical Park
#
(-109.206051, 48.373056): Feature('Bear Paw Battlefield', 'bp', W2='Battle_of_Bear_Paw'),
#
# NPS -> NM -> Bandelier National Monument
#
(-106.206648, 35.867916): Feature('Tsankawi Section', 't', W2='Tsankawi'),
#
# NPS -> NM -> Carlsbad Caverns National Park
#
(-104.460401, 32.109626): Feature('Rattlesnake Springs', 'rs', W2='Rattlesnake_Springs_Historic_District'),
#
# NPS -> NM -> Chaco Culture National Historical Park
#
(-108.109815, 35.674474): Feature("Kin Ya'a", 'ky', W2='Kin_Ya%27a', flags=FeatureFlag_SkipBounds),
(-107.681287, 35.972367): Feature('Pueblo Pintado', 'pp', flags=FeatureFlag_SkipBounds),
(-108.145752, 35.979813): Feature('Kin Bineola', 'kb', W2='Kin_Bineola', flags=FeatureFlag_SkipBounds),
#
# NPS -> NM -> El Malpais National Monument
#
(-107.819072, 35.096448): Feature('Northwest New Mexico Visitor Center', 'v', flags=FeatureFlag_SkipBounds),
#
# NPS -> NM -> Manhattan Project National Historical Park -> Los Alamos Unit
#
(-106.264959, 35.840842): Feature('Pajarito Site', 'p'),
(-106.345095, 35.843839): Feature('V-Site', 'v'),
(-106.347393, 35.855718): Feature('Gun Site', 'g'),
#
# NPS -> NM -> Pecos National Historical Park
#
(-105.817663, 35.539247): Feature('Glorieta Unit (Canoncito)', 'gwest', W2='Glorieta_Pass_Battlefield'),
(-105.683200, 35.565951): Feature('Main Unit', 'main'),
(-105.755533, 35.577073): Feature("Glorieta Unit (Pigeon's Ranch)", 'geast', W2='Glorieta_Pass_Battlefield'),
#
# NPS -> NM -> Petroglyph National Monument
#
(-106.749622, 35.153536): Feature('Southern Geologic Window', 'sgw'),
(-106.758586, 35.174355): Feature('Northern Geologic Window', 'ngw'),
(-106.688781, 35.189383): Feature('Piedras Marcadas Canyon', 'pmc'),
#
# NPS -> NM -> Salinas Pueblo Missions National Monument
#
(-106.075920, 34.260079): Feature('Gran Quivira Ruins', 'gq'),
(-106.364623, 34.451208): Feature('Abo Ruins', 'a', W2='Abo_(historic_place)'),
(-106.292308, 34.591781): Feature('Quarai Ruins', 'q', W2='Quarai'),
#
# NPS -> OR -> John Day Fossil Beds National Monument
#
(-119.618141, 44.596444): Feature('Sheep Rock Unit', 'sr'),
(-119.643497, 44.629640): Feature('Sheep Rock Unit (Cathedral Rock)', 'cr'),
(-119.632783, 44.659439): Feature('Sheep Rock Unit (Foree Area)', 'foree'),
(-120.263931, 44.663927): Feature('Painted Hills Unit', 'ph'),
(-120.402547, 44.929289): Feature('Clarno Unit', 'clarno'),
#
# NPS -> OR -> Oregon Caves National Monument and Preserve
#
(-123.644339, 42.160947): Feature('Illinois Valley Visitors Center', 'ivvc', flags=FeatureFlag_SkipBounds),
#
# NPS -> OR -> Nez Perce National Historical Park
#
(-117.224839, 45.337427): Feature('Old Chief Joseph Gravesite', 'cj', W2='Old_Chief_Joseph_Gravesite'),
(-117.520354, 45.570223): Feature('Lostine Homesite', 'lost'),
#
# NPS -> TN -> Manhattan Project National Historical Park -> Oak Ridge Unit
#
(-84.317198, 35.928011): Feature('X-10 Graphite Reactor', 'x10',
W2='X-10_Graphite_Reactor'),
(-84.394445, 35.938672): Feature('K-25 Building', 'k25',
W2='K-25'),
(-84.256801, 35.984691): Feature('Y-12 Building 9204-3', 'y9204',
W2='Clinton_Engineer_Works#Y-12_electromagnetic_separation_plant'),
(-84.255495, 35.985871): Feature('Y-12 Building 9731', 'y9731',
W2='Clinton_Engineer_Works#Y-12_electromagnetic_separation_plant'),
#
# NPS -> UT -> Canyonlands National Park
#
(-110.189552, 38.441484): Feature('Horseshoe Canyon Unit', 'hc', W2='Horseshoe_Canyon_(Utah)'),
#
# NPS -> UT -> Hovenweep National Monument
#
(-109.186458, 37.302006): Feature('Cajon', 'cajon'),
(-109.082068, 37.388997): Feature('Square Tower', 'st'),
(-109.038125, 37.397360): Feature('Holly', 'holly'),
(-109.033020, 37.405043): Feature('Horseshoe and Hackberry', 'hh'),
(-108.722510, 37.413030): Feature('Goodman Point', 'gp'),
(-108.983395, 37.444011): Feature('Cutthroat Castle', 'cc'),
#
# NPS -> WA -> Fort Vancouver National Historic Site
#
(-122.605329, 45.357518): Feature('McLoughlin House', 'mh',
W2='Fort_Vancouver_National_Historic_Site#McLoughlin_House_site'),
(-122.668086, 45.620146): NPS_FortVancouver,
(-122.670418, 45.621595): NPS_FortVancouver,
(-122.666020, 45.624453): NPS_FortVancouver,
#
# NPS -> WA -> Lewis and Clark National Historical Park
#
(-123.932193, 45.984622): Feature('Salt Works', 'salt'),
(-123.929978, 46.093451): NPS_WA_SunsetBeach,
(-123.929925, 46.105998): NPS_WA_SunsetBeach,
(-123.860655, 46.117749): NPS_WA_NetulLanding,
(-123.861380, 46.117812): NPS_WA_NetulLanding,
(-123.870444, 46.138536): Feature('Fort Clatsop', 'clatsop', W2='Fort_Clatsop'),
(-123.897380, 46.243354): Feature('Station Camp', 'station'),
(-123.858657, 46.248833): Feature('Dismal Nitch', 'dn', W2='Dismal_Nitch'),
(-124.074637, 46.302412): Feature('Cape Disappointment', 'cd', W2='Cape_Disappointment_(Washington)'),
#
# NPS -> WA -> Manhattan Project National Historical Park -> Hanford Unit
#
(-119.387098, 46.587399): Feature('Hanford High School', 'hh', W2='Hanford_Site'),
(-119.646295, 46.628954): Feature('B Reactor', 'br', W2='B_Reactor'),
(-119.715131, 46.638641): Feature("Bruggemann's Warehouse", 'bw'),
(-119.618684, 46.644369): Feature('Allard Pump House', 'ap'),
(-119.478732, 46.660534): Feature('White Bluffs Bank', 'wb', W2='White_Bluffs,_Washington'),
#
# NPS -> WA -> Minidoka National Historic Site
#
(-122.507464, 47.614825): Feature('Bainbridge Island|Japanese American|Exclusion Memorial', 'jam',
W2='Bainbridge_Island_Japanese_American_Exclusion_Memorial'),
#
# NPS -> WA -> Mount Rainier National Park
#
(-122.110922, 46.753557): Feature('Elbe', 'elbe', flags=FeatureFlag_SkipBounds),
(-121.952864, 46.992832): Feature('Carbon River Ranger Station', 'cr'),
(-122.045713, 47.103524): NPS_Rainier_Wilkeson,
(-122.046664, 47.103544): NPS_Rainier_Wilkeson,
(-122.046975, 47.103899): NPS_Rainier_Wilkeson,
#
# NPS -> WA -> San Juan Island National Historical Park
#
(-123.004894, 48.469481): Feature('American Camp', 'a'),
(-123.153833, 48.584716): NPS_SanJuan_EnglishCamp,
(-123.133915, 48.594316): NPS_SanJuan_EnglishCamp,
(-123.132979, 48.595408): NPS_SanJuan_EnglishCamp,
}
def log(message, *formatArgs):
print >>sys.stderr, message.format(*formatArgs)
def err(*args):
log(*args)
sys.exit()
def getMapLink(point, numPoints):
return 'pmap.html?o={}&ll={:f},{:f} {:>14}'.format(G.id, point[1], point[0],
'({} points)'.format(numPoints))
def featureKey(feature):
return feature['properties']['name']
def getBounds(points):
minLng, minLat = maxLng, maxLat = points[0]
for lng, lat in points[1:-1]:
if lng < minLng:
minLng = lng
elif lng > maxLng:
maxLng = lng
if lat < minLat:
minLat = lat
elif lat > maxLat:
maxLat = lat
return minLng, minLat, maxLng, maxLat
def stripHoles(geojson):
strippedFeatures = []
geojson['features'].sort(key=featureKey)
for feature in geojson['features']:
name = feature['properties']['name']
coordinates = feature['geometry']['coordinates']
numPolygons = len(coordinates)
log('> "{}" has {} polygon{}', featureKey(feature), numPolygons, '' if numPolygons == 1 else 's')
polygons = []
numSmallPolygons = 0
separateFeatures = []
for polygon in coordinates:
numPoints = len(polygon[0]) - 1
for subpolygon in polygon:
assert subpolygon[0] == subpolygon[-1]
minLng, minLat, maxLng, maxLat = getBounds(polygon[0])
if maxLng - minLng < 0.00025 and maxLat - minLat < 0.00025:
log('\tRemoving small polygon with bounds {:f},{:f}, {:f},{:f} ({} points)',
minLat, minLng, maxLat, maxLng, numPoints)
numSmallPolygons += 1
continue
secondPoint = tuple(polygon[0][1])
if secondPoint in SecondPoints:
err('2nd point {} not unique!', secondPoint)
SecondPoints.add(secondPoint)
mapLink = getMapLink(secondPoint, numPoints)
if secondPoint in PolygonsToRemove:
log('\t{} REMOVING', mapLink)
continue
if secondPoint in SeparateFeatures:
f = SeparateFeatures[secondPoint]
p = f['properties']
log('\t{} ADDING to "{}"', mapLink, p['name2'])
p.update(feature['properties'])
c = f['geometry']['coordinates']
c.append(polygon)
if len(c) == 1:
separateFeatures.append(f)
secondPoint = None
else:
log('\t{}', mapLink)
if len(polygon) > 1:
holes = []
for subpolygon in polygon[1:]:
firstPoint = tuple(subpolygon[0])
mapLink = getMapLink(firstPoint, len(subpolygon) - 1)
if firstPoint in HolesToRemove:
log('\t\tHole at {} REMOVING', mapLink)
else:
log('\t\tHole at {}', mapLink)
holes.append(subpolygon)
polygon[1:] = holes
if secondPoint is not None:
polygons.append(polygon)
if numSmallPolygons > 0:
log('\tRemoved {} small polygon{}', numSmallPolygons, '' if numSmallPolygons == 1 else 's')
coordinates[:] = polygons
if polygons:
strippedFeatures.append(feature)
if separateFeatures:
flags = feature['properties'].get('flags', 0) | FeatureFlag_SetBounds
log('\tSetting flags to {}', flags)
feature['properties']['flags'] = flags
strippedFeatures.extend(separateFeatures)
geojson['features'] = strippedFeatures
@staticmethod
def postprocessNPS(geojson):
assert len(NPS_ParkAndPreserve) == 0
featureMap = {}
for feature in geojson['features']:
properties = feature['properties']
name = properties['name']
geometry = feature['geometry']
if name in featureMap:
log('Duplicate feature for "{}"', name)
continue
featureMap[name] = feature
if geometry['type'] == 'Polygon':
polygon = geometry['coordinates']
geometry['type'] = 'MultiPolygon'
geometry['coordinates'] = [polygon]
stripHoles(geojson)
prevName = None
for feature in geojson['features']:
p = feature['properties']
id = p.pop('id')
id2 = p.pop('id2', None)
name = p['name']
name2 = p.get('name2')
if name != prevName:
prevName = name
log('\t\t{} ={}', name, id)
else:
assert name2 is not None
if name2 is not None:
assert id2 is not None
log('\t\t\t{} ={}', name2, id2)
@staticmethod
def mergePolygons(geojson):
featureMap = {}
featureList = []
for feature in geojson['features']:
properties = feature['properties']
name = properties['name']
geometry = feature['geometry']
assert geometry['type'] == 'Polygon'
polygon = geometry['coordinates']
if name in featureMap:
feature = featureMap[name]
assert properties == feature['properties']
geometry = feature['geometry']
assert geometry['type'] == 'MultiPolygon'
geometry['coordinates'].append(polygon)
else:
featureList.append(feature)
featureMap[name] = feature
geometry['type'] = 'MultiPolygon'
geometry['coordinates'] = [polygon]
geojson['features'] = featureList
stripHoles(geojson)
@staticmethod
def stripPolygon(geoType, coordinates):
if geoType == 'Polygon':
if XY2LL:
coordinates = [[[round(c, 6) for c in XY2LL(x, y)] for x, y in subpolygon]
for subpolygon in coordinates]
else:
coordinates = [[(round(x, 6), round(y, 6)) for x, y in subpolygon]
for subpolygon in coordinates]
elif geoType == 'MultiPolygon':
if XY2LL:
coordinates = [[[[round(c, 6) for c in XY2LL(x, y)] for x, y in subpolygon]
for subpolygon in polygon]
for polygon in coordinates]
else:
coordinates = [[[(round(x, 6), round(y, 6)) for x, y in subpolygon]
for subpolygon in polygon]
for polygon in coordinates]
else:
sys.exit('Unexpected geometry type: "{}"'.format(geoType))
return {'type': geoType, 'coordinates': coordinates}
NPS_Wikipedia = {
'alag': 'Alagnak_River',
'ania': 'Aniakchak_National_Monument_and_Preserve',
'crmo': 'Craters_of_the_Moon_National_Monument_and_Preserve',
'dena': 'Denali_National_Park_and_Preserve',
'deto': 'Devils_Tower',
'fobo': 'Fort_Bowie',
'fopo': 'Fort_Point,_San_Francisco',
'gaar': 'Gates_of_the_Arctic_National_Park_and_Preserve',
'glac': 'Glacier_National_Park_(U.S.)',
'glba': 'Glacier_Bay_National_Park_and_Preserve',
'grsa': 'Great_Sand_Dunes_National_Park_and_Preserve',
'hale': 'Haleakal%C4%81_National_Park',
'havo': 'Hawai%CA%BBi_Volcanoes_National_Park',
'hono': 'Honouliuli_Internment_Camp',
'kaho': 'Honok%C5%8Dhau_Settlement_and_Kaloko-Honok%C5%8Dhau_National_Historical_Park',
'kala': 'Kalaupapa_Leprosy_Settlement_and_National_Historical_Park',
'katm': 'Katmai_National_Park_and_Preserve',
'lacl': 'Lake_Clark_National_Park_and_Preserve',
'lewi': 'Lewis_and_Clark_National_and_State_Historical_Parks',
'manz': 'Manzanar',
'puhe': 'Pu%CA%BBukohol%C4%81_Heiau_National_Historic_Site',
'puho': 'Pu%CA%BBuhonua_o_H%C5%8Dnaunau_National_Historical_Park',
'redw': 'Redwood_National_and_State_Parks',
'rori': 'Rosie_the_Riveter/World_War_II_Home_Front_National_Historical_Park',
'sucr': 'Sunset_Crater#Sunset_Crater_Volcano_National_Monument',
'tuma': 'Tumac%C3%A1cori_National_Historical_Park',
'whis': 'Whiskeytown%E2%80%93Shasta%E2%80%93Trinity_National_Recreation_Area',
'wrst': 'Wrangell%E2%80%93St._Elias_National_Park_and_Preserve',
'yuch': 'Yukon%E2%80%93Charley_Rivers_National_Preserve',
}
class ParkAndPreserve(object):
def __init__(self, state, name, parkType='Park'):
self.state = state
self.name = name
self.parkType = parkType
self.gotPark = False
self.gotPreserve = False
NPS_ParkAndPreserve = {
'ania': ParkAndPreserve('AK', 'Aniakchak', 'Monument'),
'dena': ParkAndPreserve('AK', 'Denali'),
'gaar': ParkAndPreserve('AK', 'Gates of the Arctic'),
'glba': ParkAndPreserve('AK', 'Glacier Bay'),
'grsa': ParkAndPreserve('CO', 'Great Sand Dunes'),
'katm': ParkAndPreserve('AK', 'Katmai'),
'lacl': ParkAndPreserve('AK', 'Lake Clark'),
'wrst': ParkAndPreserve('AK', 'Wrangell-St. Elias'),
}
NPS_Names = {
'cech': 'Cesar E. Chavez National Monument',
'jodr': 'John D. Rockefeller Jr.|Memorial Parkway',
'libi': 'Little Bighorn Battlefield|National Monument',
'poch': 'Port Chicago Naval Magazine|National Memorial',
'rori': 'Rosie the Riveter|World War II Home Front|National Historical Park',
'safr': 'San Francisco Maritime|National Historical Park',
'samo': 'Santa Monica Mountains|National Recreation Area',
'valr': 'World War II Valor in the Pacific|National Monument',
'whis': 'Whiskeytown-Shasta-Trinity|National Recreation Area|(Whiskeytown Unit)',
}
NPS_Codes = {
'kica': 'seki',
'sequ': 'seki',
}
NPS_MultiState = {
'mapr': ('NM', 'TN', 'WA'), # Manhattan Project National Historical Park
'miin': ('ID', 'WA'), # Minidoka National Historic Site
'nepe': ('ID', 'MT', 'OR', 'WA'), # Nez Perce National Historical Park
'valr': ('CA', 'HI'), # World War II Valor in the Pacific National Monument
}
NPS_StatePattern = re.compile('^[A-Z]{2}$')
NPS_CodePattern = re.compile('^[A-Z]{4}$')
NPS_ID_Set = set()
def nps_split_name(name):
if name.endswith(' National Park'):
return name[:-14], 'Park'
if name.endswith(' National Preserve'):
return name[:-18], 'Preserve'
if name.endswith(' National Monument'):
return name[:-18], 'Monument'
err('"{}" is not a national park, preserve, or monument!', name)
@staticmethod
def stripPropertiesNPS(o):
name = o['UNIT_NAME']
code = o['UNIT_CODE']
if NPS_CodePattern.match(code) is None:
err('Code "{}" doesn\'t match pattern!', code)
state = o['STATE']
if NPS_StatePattern.match(state) is None:
err('State "{}" doesn\'t match pattern!', state)
id = code.lower()
code = NPS_Codes.get(id, id)
displayName = NPS_Names.get(id, name)
w = NPS_Wikipedia.get(id)
pp = NPS_ParkAndPreserve.get(id)
if pp is not None:
assert pp.state == state
name, parkType = nps_split_name(name)
assert pp.name == name
if parkType == 'Preserve':
if pp.gotPark:
del NPS_ParkAndPreserve[id]
else:
pp.gotPreserve = True
id += 'pr'
else:
assert pp.parkType == parkType
if pp.gotPreserve:
del NPS_ParkAndPreserve[id]
else:
pp.gotPark = True
if id in NPS_ID_Set:
err('NPS ID "{}" for "{}" is not unique!', id, displayName)
NPS_ID_Set.add(id)
if state != G.state and G.state not in NPS_MultiState.get(id, ()):
return None
p = {'name': displayName, 'code': code, 'id': id}
if w is not None:
p['W'] = w
return p
UC_Names = (
'Sweeney Granite Mountains Desert Research Center',
)
UC_LongNames = {
'Sweeney Granite Mountains Desert Research Center':
('Sweeney Granite Mountains', 'Desert Research Center'),
}
UC_Campuses = (
'UC Riverside',
)
UC_Counties = (
'San Bernardino County',
)
@staticmethod
def stripPropertiesUC(o):
name = o['Name']
campus = o['Campus']
county = o['County']
assert name in UC_Names
assert campus in UC_Campuses
assert county in UC_Counties
p = {'campus': campus}
if name in UC_LongNames:
p['name'], p['name2'] = UC_LongNames[name]
else:
p['name'] = name
return p
class NPS(object):
#
# Boundaries for land managed by the National Park Service
#
filename = 'nps_boundary'
nameField = 'UNIT_NAME'
stripGeometry = stripPolygon
stripProperties = stripPropertiesNPS
postprocess = postprocessNPS
class UC_GRANITES(object):
#
# Boundary for the Sweeney Granite Mountains Desert Research Center
#
filename = 'uc/Sweeney_Granite_Mountains_Boundary'
nameField = 'Campus'
stripGeometry = stripPolygon
stripProperties = stripPropertiesUC
postprocess = mergePolygons
def stripHook(o):
if 'coordinates' in o:
return G.stripGeometry(o['type'], o['coordinates'])
if G.nameField in o:
return G.stripProperties(o)
if 'properties' in o:
oType = o['type']
if oType == 'Feature':
oProperties = o['properties']
if oProperties is None:
return None
return {'type': oType, 'properties': oProperties, 'geometry': o['geometry']}
if oType == 'name':
return None
sys.exit('Unexpected object with properties has type "{}"'.format(oType))
if 'features' in o:
oType = o['type']
if oType == 'FeatureCollection':
return {'type': oType, 'features': [f for f in o['features'] if f is not None]}
sys.exit('Unexpected object with features has type "{}"'.format(oType))
if 'name' in o:
return None
sys.exit('Unexpected object has the following keys: {}'.format(', '.join(
['"{}"'.format(k) for k in o.iterkeys()])))
def strip():
filename = 'data/{}_{}.json'.format(G.filename, 'xy' if XY2LL else 'll')
log('Opening {}', filename)
jsonFile = open(filename)
data = json.load(jsonFile, object_hook=stripHook)
jsonFile.close()
G.postprocess(data)
json.dump(data, sys.stdout, separators=(',', ':'))
def parseArgs():
global G, XY2LL
global PolygonsToRemove
modeMap = {
'uc/granites': UC_GRANITES,
}
for state in ('ak', 'az', 'ca', 'co', 'hi', 'id', 'mt', 'nm', 'nv', 'or', 'ut', 'wa', 'wy'):
modeMap['nps/' + state] = NPS
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('mode', choices=sorted(modeMap.keys()))
parser.add_argument('--xy2ll', choices=('ca', 'sca'))
args = parser.parse_args()
G = modeMap[args.mode]
G.id = args.mode.replace('/', '_')
if G is NPS:
G.state = args.mode[-2:].upper()
PolygonsToRemove = PolygonsToRemove.get(G.id, ())
if args.xy2ll:
import geo
if args.xy2ll == 'ca':
XY2LL = geo.CaliforniaAlbers().inverse
elif args.xy2ll == 'sca':
R = geo.GRS_1980_Ellipsoid.a
XY2LL = geo.AlbersSphere(-120.0, 0.0, 34.0, 40.5, R).setFalseNorthing(-4000000.0).inverse
if __name__ == '__main__':
parseArgs()
strip()
| nightjuggler/peaks | format_gis_data.py | Python | mit | 27,823 |
import numpy as np
from mushroom_rl.algorithms.policy_search.policy_gradient import PolicyGradient
class GPOMDP(PolicyGradient):
"""
GPOMDP algorithm.
"Infinite-Horizon Policy-Gradient Estimation". Baxter J. and Bartlett P. L..
2001.
"""
def __init__(self, mdp_info, policy, optimizer, features=None):
super().__init__(mdp_info, policy, optimizer, features)
self.sum_d_log_pi = None
self.list_sum_d_log_pi = list()
self.list_sum_d_log_pi_ep = list()
self.list_reward = list()
self.list_reward_ep = list()
self.baseline_num = list()
self.baseline_den = list()
self.step_count = 0
self._add_save_attr(
sum_d_log_pi='numpy',
list_sum_d_log_pi='pickle',
list_sum_d_log_pi_ep='pickle',
list_reward='pickle',
list_reward_ep='pickle',
baseline_num='pickle',
baseline_den='pickle',
step_count='numpy'
)
# Ignore divide by zero
np.seterr(divide='ignore', invalid='ignore')
def _compute_gradient(self, J):
n_episodes = len(self.list_sum_d_log_pi_ep)
grad_J_episode = list()
for i in range(n_episodes):
list_sum_d_log_pi = self.list_sum_d_log_pi_ep[i]
list_reward = self.list_reward_ep[i]
n_steps = len(list_sum_d_log_pi)
gradient = np.zeros(self.policy.weights_size)
for t in range(n_steps):
step_grad = list_sum_d_log_pi[t]
step_reward = list_reward[t]
baseline = np.mean(self.baseline_num[t], axis=0) / np.mean(self.baseline_den[t], axis=0)
baseline[np.logical_not(np.isfinite(baseline))] = 0.
gradient += step_grad * (step_reward - baseline)
grad_J_episode.append(gradient)
gradJ = np.mean(grad_J_episode, axis=0)
self.list_reward_ep = list()
self.list_sum_d_log_pi_ep = list()
self.baseline_num = list()
self.baseline_den = list()
return gradJ
def _step_update(self, x, u, r):
discounted_reward = self.df * r
self.list_reward.append(discounted_reward)
d_log_pi = self.policy.diff_log(x, u)
self.sum_d_log_pi += d_log_pi
self.list_sum_d_log_pi.append(self.sum_d_log_pi.copy())
squared_sum_d_log_pi = np.square(self.sum_d_log_pi)
if self.step_count >= len(self.baseline_num):
self.baseline_num.append(list())
self.baseline_den.append(list())
self.baseline_num[self.step_count].append(discounted_reward * squared_sum_d_log_pi)
self.baseline_den[self.step_count].append(squared_sum_d_log_pi)
self.step_count += 1
def _episode_end_update(self):
self.list_reward_ep.append(self.list_reward)
self.list_reward = list()
self.list_sum_d_log_pi_ep.append(self.list_sum_d_log_pi)
self.list_sum_d_log_pi = list()
def _init_update(self):
self.sum_d_log_pi = np.zeros(self.policy.weights_size)
self.list_sum_d_log_pi = list()
self.step_count = 0
| carloderamo/mushroom | mushroom_rl/algorithms/policy_search/policy_gradient/gpomdp.py | Python | mit | 3,178 |
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
XML = """<?xml version='1.0' encoding='UTF-8'?>
<descriptor xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://www.rpath.com/permanent/descriptor-1.0.xsd" xsi:schemaLocation="http://www.rpath.com/permanent/descriptor-1.0.xsd descriptor-1.0.xsd">
<metadata>
<displayName>Mountable Filesystem Image Configuration</displayName>
<descriptions>
<desc>Mountable Filesystem Image Configuration</desc>
</descriptions>
</metadata>
<dataFields>
<field>
<name>displayName</name>
<descriptions>
<desc>Image name</desc>
</descriptions>
<help href="@Help_image_name@"/>
<type>str</type>
<required>true</required>
</field>
<field>
<name>options.baseFileName</name>
<descriptions>
<desc>Image filename</desc>
</descriptions>
<help href="@Help_image_filename@"/>
<type>str</type>
<required>false</required>
</field>
<field>
<name>options.installLabelPath</name>
<descriptions>
<desc>Conary installLabelPath</desc>
</descriptions>
<help href="@Help_conary_installlabelpath@"/>
<type>str</type>
<required>false</required>
</field>
<field>
<name>options.freespace</name>
<descriptions>
<desc>Free space</desc>
</descriptions>
<help href="@Help_image_freespace@"/>
<type>int</type>
<default>256</default>
<constraints>
<range>
<min>16</min>
</range>
</constraints>
<required>false</required>
</field>
<field>
<name>options.swapSize</name>
<descriptions>
<desc>Swap space</desc>
</descriptions>
<help href="@Help_image_swapspace@"/>
<type>int</type>
<default>512</default>
<constraints>
<range>
<min>16</min>
</range>
</constraints>
<required>false</required>
</field>
<field>
<name>options.autoResolve</name>
<descriptions>
<desc>Autoinstall Dependencies</desc>
</descriptions>
<help href="@Help_resolve_dependencies@"/>
<type>bool</type>
<default>false</default>
<required>false</required>
</field>
<field>
<name>options.buildOVF10</name>
<descriptions>
<desc>Generate in OVF 1.0?</desc>
</descriptions>
<help href="@Help_build_ovf_1_0@"/>
<type>bool</type>
<default>false</default>
<required>false</required>
</field>
</dataFields>
</descriptor>"""
| sassoftware/mint | mint/django_rest/rbuilder/platforms/image_type_descriptors/rawFsImage.py | Python | apache-2.0 | 3,614 |
#!/usr/bin/env python
from threading import Thread
import docker
import json
import logging
logging.basicConfig(level=logging.ERROR,
format='[%(levelname)s] (%(threadName)-10s) %(message)s',
)
global client
client = docker.from_env()
# /etc/hosts 127.0.0.1 localhost
class ContainerMetadata:
def __init__(self, id, short_id, name, image_name, version, ip):
self.id = id
self.short_id = short_id
self.name = name
self.image_name = image_name
self.version = version
self.ip = ip
def print_containers(containers):
containers_ips = ''
for container_name in containers:
container_number = 1
number_of_containers = len(containers[container_name])
for container_metadata in containers[container_name]:
image_container_name = container_metadata.image_name
if number_of_containers > 1:
image_container_name += str(container_number)
host_format = '{0} {1} {2} {3}\n'
host = host_format.format(container_metadata.ip, image_container_name, container_metadata.name, container_metadata.short_id)
containers_ips += host
container_number += 1
return containers_ips
def write_hosts(containers):
hosts_file = open("/etc/hosts", "w")
hosts_file.write("127.0.0.1 localhost\n")
hosts_file.write(print_containers(containers))
hosts_file.close()
def add_new_container(containers, event):
event_json = json.loads(event)
container = client.containers.get(event_json['id'])
print('Starting container ' + event_json['id'])
image_name = container.image.tags[0].replace('jorgeacf/', '')
name = image_name.split(':')[0]
version = image_name.split(':')[1]
ip = container.attrs['NetworkSettings']['IPAddress']
new_container = ContainerMetadata(container.id, container.short_id, container.name, name, version, ip)
if name in containers:
containers[name] = containers[name] + [new_container]
else:
containers[name] = [new_container]
return new_container
def remove_container(containers, event):
event_json = json.loads(event)
container = client.containers.get(event_json['id'])
print('Stoping container ' + event_json['id'])
image_name = container.image.tags[0].replace('jorgeacf/', '')
name = image_name.split(':')[0]
#for key, value in containers.iteritems():
# for item in value:
# print(item.id)
if name in containers:
containers[name] = [c for c in containers[name] if c.id != event_json['id']]
def list_containers(containers):
for container in client.containers.list():
if 'jorgeacf' in container.image.tags[0]:
image_name = container.image.tags[0].replace('jorgeacf/', '')
name = image_name.split(':')[0]
version = image_name.split(':')[1]
ip = container.attrs['NetworkSettings']['IPAddress']
print('List - Name: ' + name)
new_container = ContainerMetadata(container.id, container.short_id, container.name, name, version, ip)
if name in containers:
containers[name] = containers[name] + [new_container]
else:
containers[name] = [new_container]
def update_containers_hosts(containers):
for container_type in containers:
for container_name in containers[container_type]:
container = client.containers.get(container_name.id)
hosts = print_containers(containers)
command = 'echo "### BEGIN DOCKER CONTAINER HOSTS\n' + hosts + '### END DOCKER CONTAINER HOSTS" > /etc/hosts'
container.exec_run(['sh', '-c', command])
def main():
print('Main start...')
containers = {}
list_containers(containers)
for event in client.events():
event_json = json.loads(event)
if 'status' in event_json:
if 'start' == event_json['status']:
add_new_container(containers, event)
update_containers_hosts(containers)
write_hosts(containers)
if 'kill' == event_json['status']:
remove_container(containers, event)
write_hosts(containers)
print('Main end...')
if __name__== "__main__":
main()
| jorgeacf/dockerfiles | ci/portainer/scripts/docker-events.py | Python | gpl-3.0 | 3,868 |
import os
from queue import Queue
from bears.python.requirements.PySafetyBear import PySafetyBear
from coalib.settings.Section import Section
from coalib.settings.Setting import Setting
from coalib.testing.LocalBearTestHelper import LocalBearTestHelper
from coalib.results.Result import Result
from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY
from coalib.testing.BearTestHelper import generate_skip_decorator
def get_testfile_path(name):
return os.path.join(os.path.dirname(__file__),
'PySafety_test_files',
name)
def load_testfile(name):
return open(get_testfile_path(name)).readlines()
@generate_skip_decorator(PySafetyBear)
class PySafetyBearTest(LocalBearTestHelper):
def setUp(self):
self.section = Section('name')
self.uut = PySafetyBear(self.section, Queue())
def test_without_vulnerability(self):
self.check_validity(self.uut, ['lxml==3.6.0'])
def test_with_vulnerability(self):
self.check_invalidity(self.uut, ['bottle==0.10.1'])
def test_with_cve_vulnerability(self):
file_name = 'requirement.txt'
file_contents = load_testfile(file_name)
file_contents = [file_contents[0]]
self.check_results(
self.uut,
file_contents,
[Result.from_values('PySafetyBear',
'bottle<0.12.10 is vulnerable '
'to CVE-2016-9964 and your project '
'is using 0.10.0.',
file=get_testfile_path(file_name),
line=1,
column=9,
end_line=1,
end_column=15,
severity=RESULT_SEVERITY.NORMAL,
additional_info='redirect() in bottle.py '
'in bottle 0.12.10 doesn\'t filter '
'a "\\r\\n" sequence, which leads '
'to a CRLF attack, as demonstrated '
'by a redirect("233\\r\\nSet-Cookie: '
'name=salt") call.'),
Result.from_values('PySafetyBear',
'bottle>=0.10,<0.10.12 is vulnerable to '
'CVE-2014-3137 and your project is '
'using 0.10.0.',
file=get_testfile_path(file_name),
line=1,
column=9,
end_line=1,
end_column=15,
severity=RESULT_SEVERITY.NORMAL,
additional_info='Bottle 0.10.x before 0.10.12,'
' 0.11.x before 0.11.7, and 0.12.x before'
' 0.12.6 does not properly limit content'
' types, which allows remote attackers to'
' bypass intended access restrictions via an'
' accepted Content-Type followed by a ;'
' (semi-colon) and a Content-Type that'
' would not be accepted, as demonstrated in'
' YouCompleteMe to execute arbitrary code.')],
filename=get_testfile_path(file_name))
def test_without_cve_vulnerability(self):
file_name = 'requirement.txt'
file_contents = load_testfile(file_name)
file_contents = [file_contents[1]]
self.check_results(
self.uut,
file_contents,
[Result.from_values('PySafetyBear',
'locustio<0.7 is vulnerable to pyup.io-25878 '
'and your project is using 0.5.1.',
file=get_testfile_path(file_name),
line=1,
column=11,
end_line=1,
end_column=16,
severity=RESULT_SEVERITY.NORMAL,
additional_info='locustio before '
'0.7 uses pickle.',
)],
filename=get_testfile_path(file_name))
def test_with_cve_ignore(self):
self.section.append(Setting('cve_ignore', 'CVE-2016-9964, '
'CVE-2014-3137'))
file_name = 'requirement.txt'
file_contents = load_testfile(file_name)
# file_contents = [file_contents[0]]
self.check_results(
self.uut,
file_contents,
[Result.from_values('PySafetyBear',
'locustio<0.7 is vulnerable to pyup.io-25878 '
'and your project is using 0.5.1.',
file=get_testfile_path(file_name),
line=2,
column=11,
end_line=2,
end_column=16,
severity=RESULT_SEVERITY.NORMAL,
additional_info='locustio before '
'0.7 uses pickle.',
)],
filename=get_testfile_path(file_name))
def test_with_no_requirements(self):
self.check_validity(self.uut, [])
def test_with_no_pinned_requirements(self):
self.check_validity(self.uut, ['foo'])
| coala/coala-bears | tests/python/requirements/PySafetyBearWithoutMockTest.py | Python | agpl-3.0 | 5,749 |
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest2 as unittest
from tinctest.runner import TINCTestResultSet
from tinctest.case import TINCTestCase
class TINCTestSuite(unittest.TestSuite):
"""
A base TINC test suite that are constrcuted by the tinc loaders and that
can be passed to tinc test runners. TINC test loaders and runners understands only
TINCTestSuite-s which is an extension of unittest.TestSuite.
For use, create an instance of TINCTestSuite, then add test case instances - of type TINCTestCase.
When all tests have been added, the suite can be passed to a test
runner, such as TINCTestRunner.
In addition to what unittest does , TINCTestSuite knows how to call custom result objects defined
for a TINC test case. Each TINCTestCase can define it's own result object and expose it through it's
defaultTestResult method which will be used by TINCTestSuite to call the specific result object apis like
addSuccess, addFailure etc configured in it's custom result object.
"""
def __init__(self, tests=(), dryrun=False):
super(TINCTestSuite, self).__init__(tests)
# Easier to add this as an instance variable than providing
# an argument to TestSuite's run method. The latter requires
# us to override the runner's run method as well which is what
# calls test suite run.
self.dryrun = dryrun
def __str__(self):
return "{dryrun=%s: %s}" %(str(self.dryrun), super(TINCTestSuite, self).__str__())
def flatten(self, flat_test_suite):
"""
This utility allows user to flatten self. All the suites within self's tests will be flattened and added to flat_test_suite
"""
for test in self._tests:
if isinstance(test, TINCTestSuite):
test.flatten(flat_test_suite)
else:
flat_test_suite._tests.append(test)
def _wrapped_run(self, result, debug=False):
if self.dryrun:
self._dryrun(result)
return
for test in self:
if result.shouldStop:
break
if unittest.suite._isnotsuite(test):
run_result = test.defaultTestResult()
self._tearDownPreviousClass(test, result)
self._handleModuleFixture(test, result)
self._handleClassSetUp(test, result)
result._previousTestClass = test.__class__
if (getattr(test.__class__, '_classSetupFailed', False) or
getattr(result, '_moduleSetUpFailed', False)):
continue
if issubclass(result.__class__, TINCTestResultSet) and \
issubclass(test.__class__, TINCTestCase):
run_result = test.defaultTestResult(result.stream,
result.descriptions,
result.verbosity)
else:
if issubclass(result.__class__, TINCTestResultSet):
run_result = TINCTestResultSet(result.stream,
result.descriptions,
result.verbosity)
if hasattr(test, '_wrapped_run'):
test._wrapped_run(result, debug)
if issubclass(result.__class__, TINCTestResultSet):
result.addResult(run_result)
else:
result.testsRun += run_result.testsRun
if run_result.failures:
result.failures.extend(run_result.failures)
if run_result.errors:
result.errors.extend(run_result.errors)
if run_result.skipped:
result.skipped.extend(run_result.skipped)
elif not debug:
test(run_result)
if issubclass(result.__class__, TINCTestResultSet):
result.addResult(run_result)
else:
result.testsRun += run_result.testsRun
if run_result.failures:
result.failures.extend(run_result.failures)
if run_result.errors:
result.errors.extend(run_result.errors)
if run_result.skipped:
result.skipped.extend(run_result.skipped)
else:
test.debug()
def _dryrun(self, result):
"""
Runs the given test or test suite in dryrun mode. No tests will be run, just import and load failures will be reported
along with tests that are skipped.
"""
for test in self:
if hasattr(test, '_dryrun'):
test._dryrun(result)
else:
# If it is an actual test and if the test does not have any skip metadata associated with it, run the test,
# This will just be tests with module import failures and test case load failures.
if test.__class__.__name__ == 'TINCTestCaseLoadFailure' or test.__class__.__name__ == 'TINCModuleImportFailure':
test(result)
elif issubclass(test.__class__, TINCTestCase):
result.startTest(test)
if test.skip:
result.addSkip(test, test.skip)
else:
result.addSuccess(test)
result.stopTest(test)
elif test.__class__.__name__ == 'ModuleImportFailure':
test(result)
def flatten(self, flat_test_suite):
"""
This utility allows user to flatten self. All the suites within self's tests will be flattened and added to flat_test_suite
"""
for test in self._tests:
if isinstance(test, TINCTestSuite):
test.flatten(flat_test_suite)
else:
flat_test_suite._tests.append(test)
| edespino/gpdb | src/test/tinc/tinctest/suite.py | Python | apache-2.0 | 6,748 |
# Generated by Django 1.11.9 on 2018-02-01 15:00
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('linked_domain', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='domainlink',
name='last_pull',
field=models.DateTimeField(null=True),
),
]
| dimagi/commcare-hq | corehq/apps/linked_domain/migrations/0002_migrate_linked_apps.py | Python | bsd-3-clause | 409 |
# Used to configure CxxTest
import csnBuild
cxxTest = csnBuild.Project("CxxTest", "library")
| msteghofer/CSnake | tests/data/thirdParty/CxxTest/csnCxxTest.py | Python | bsd-3-clause | 94 |
from django import forms
from fields import Html5CaptchaField
from html5input import Html5EmailInput
class ContactForm(forms.Form):
email = forms.EmailField(required=True, widget=Html5EmailInput(attrs={'required': None}))
message = forms.CharField(widget=forms.Textarea(attrs={'required': None}))
captcha = Html5CaptchaField(required=True)
| upTee/upTee | uptee/messaging/forms.py | Python | bsd-3-clause | 354 |
import builtins
import gc
import sys
import types
import math
import unittest
import weakref
from copy import deepcopy
from test import support
class OperatorsTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
self.binops = {
'add': '+',
'sub': '-',
'mul': '*',
'div': '/',
'divmod': 'divmod',
'pow': '**',
'lshift': '<<',
'rshift': '>>',
'and': '&',
'xor': '^',
'or': '|',
'cmp': 'cmp',
'lt': '<',
'le': '<=',
'eq': '==',
'ne': '!=',
'gt': '>',
'ge': '>=',
}
for name, expr in list(self.binops.items()):
if expr.islower():
expr = expr + "(a, b)"
else:
expr = 'a %s b' % expr
self.binops[name] = expr
self.unops = {
'pos': '+',
'neg': '-',
'abs': 'abs',
'invert': '~',
'int': 'int',
'float': 'float',
'oct': 'oct',
'hex': 'hex',
}
for name, expr in list(self.unops.items()):
if expr.islower():
expr = expr + "(a)"
else:
expr = '%s a' % expr
self.unops[name] = expr
def unop_test(self, a, res, expr="len(a)", meth="__len__"):
d = {'a': a}
self.assertEqual(eval(expr, d), res)
t = type(a)
m = getattr(t, meth)
# Find method in parent class
while meth not in t.__dict__:
t = t.__bases__[0]
# in some implementations (e.g. PyPy), 'm' can be a regular unbound
# method object; the getattr() below obtains its underlying function.
self.assertEqual(getattr(m, 'im_func', m), t.__dict__[meth])
self.assertEqual(m(a), res)
bm = getattr(a, meth)
self.assertEqual(bm(), res)
def binop_test(self, a, b, res, expr="a+b", meth="__add__"):
d = {'a': a, 'b': b}
# XXX Hack so this passes before 2.3 when -Qnew is specified.
if meth == "__div__" and 1/2 == 0.5:
meth = "__truediv__"
if meth == '__divmod__': pass
self.assertEqual(eval(expr, d), res)
t = type(a)
m = getattr(t, meth)
while meth not in t.__dict__:
t = t.__bases__[0]
# in some implementations (e.g. PyPy), 'm' can be a regular unbound
# method object; the getattr() below obtains its underlying function.
self.assertEqual(getattr(m, 'im_func', m), t.__dict__[meth])
self.assertEqual(m(a, b), res)
bm = getattr(a, meth)
self.assertEqual(bm(b), res)
def sliceop_test(self, a, b, c, res, expr="a[b:c]", meth="__getitem__"):
d = {'a': a, 'b': b, 'c': c}
self.assertEqual(eval(expr, d), res)
t = type(a)
m = getattr(t, meth)
while meth not in t.__dict__:
t = t.__bases__[0]
# in some implementations (e.g. PyPy), 'm' can be a regular unbound
# method object; the getattr() below obtains its underlying function.
self.assertEqual(getattr(m, 'im_func', m), t.__dict__[meth])
self.assertEqual(m(a, slice(b, c)), res)
bm = getattr(a, meth)
self.assertEqual(bm(slice(b, c)), res)
def setop_test(self, a, b, res, stmt="a+=b", meth="__iadd__"):
d = {'a': deepcopy(a), 'b': b}
exec(stmt, d)
self.assertEqual(d['a'], res)
t = type(a)
m = getattr(t, meth)
while meth not in t.__dict__:
t = t.__bases__[0]
# in some implementations (e.g. PyPy), 'm' can be a regular unbound
# method object; the getattr() below obtains its underlying function.
self.assertEqual(getattr(m, 'im_func', m), t.__dict__[meth])
d['a'] = deepcopy(a)
m(d['a'], b)
self.assertEqual(d['a'], res)
d['a'] = deepcopy(a)
bm = getattr(d['a'], meth)
bm(b)
self.assertEqual(d['a'], res)
def set2op_test(self, a, b, c, res, stmt="a[b]=c", meth="__setitem__"):
d = {'a': deepcopy(a), 'b': b, 'c': c}
exec(stmt, d)
self.assertEqual(d['a'], res)
t = type(a)
m = getattr(t, meth)
while meth not in t.__dict__:
t = t.__bases__[0]
# in some implementations (e.g. PyPy), 'm' can be a regular unbound
# method object; the getattr() below obtains its underlying function.
self.assertEqual(getattr(m, 'im_func', m), t.__dict__[meth])
d['a'] = deepcopy(a)
m(d['a'], b, c)
self.assertEqual(d['a'], res)
d['a'] = deepcopy(a)
bm = getattr(d['a'], meth)
bm(b, c)
self.assertEqual(d['a'], res)
def setsliceop_test(self, a, b, c, d, res, stmt="a[b:c]=d", meth="__setitem__"):
dictionary = {'a': deepcopy(a), 'b': b, 'c': c, 'd': d}
exec(stmt, dictionary)
self.assertEqual(dictionary['a'], res)
t = type(a)
while meth not in t.__dict__:
t = t.__bases__[0]
m = getattr(t, meth)
# in some implementations (e.g. PyPy), 'm' can be a regular unbound
# method object; the getattr() below obtains its underlying function.
self.assertEqual(getattr(m, 'im_func', m), t.__dict__[meth])
dictionary['a'] = deepcopy(a)
m(dictionary['a'], slice(b, c), d)
self.assertEqual(dictionary['a'], res)
dictionary['a'] = deepcopy(a)
bm = getattr(dictionary['a'], meth)
bm(slice(b, c), d)
self.assertEqual(dictionary['a'], res)
def test_lists(self):
# Testing list operations...
# Asserts are within individual test methods
self.binop_test([1], [2], [1,2], "a+b", "__add__")
self.binop_test([1,2,3], 2, 1, "b in a", "__contains__")
self.binop_test([1,2,3], 4, 0, "b in a", "__contains__")
self.binop_test([1,2,3], 1, 2, "a[b]", "__getitem__")
self.sliceop_test([1,2,3], 0, 2, [1,2], "a[b:c]", "__getitem__")
self.setop_test([1], [2], [1,2], "a+=b", "__iadd__")
self.setop_test([1,2], 3, [1,2,1,2,1,2], "a*=b", "__imul__")
self.unop_test([1,2,3], 3, "len(a)", "__len__")
self.binop_test([1,2], 3, [1,2,1,2,1,2], "a*b", "__mul__")
self.binop_test([1,2], 3, [1,2,1,2,1,2], "b*a", "__rmul__")
self.set2op_test([1,2], 1, 3, [1,3], "a[b]=c", "__setitem__")
self.setsliceop_test([1,2,3,4], 1, 3, [5,6], [1,5,6,4], "a[b:c]=d",
"__setitem__")
def test_dicts(self):
# Testing dict operations...
self.binop_test({1:2,3:4}, 1, 1, "b in a", "__contains__")
self.binop_test({1:2,3:4}, 2, 0, "b in a", "__contains__")
self.binop_test({1:2,3:4}, 1, 2, "a[b]", "__getitem__")
d = {1:2, 3:4}
l1 = []
for i in list(d.keys()):
l1.append(i)
l = []
for i in iter(d):
l.append(i)
self.assertEqual(l, l1)
l = []
for i in d.__iter__():
l.append(i)
self.assertEqual(l, l1)
l = []
for i in dict.__iter__(d):
l.append(i)
self.assertEqual(l, l1)
d = {1:2, 3:4}
self.unop_test(d, 2, "len(a)", "__len__")
self.assertEqual(eval(repr(d), {}), d)
self.assertEqual(eval(d.__repr__(), {}), d)
self.set2op_test({1:2,3:4}, 2, 3, {1:2,2:3,3:4}, "a[b]=c",
"__setitem__")
# Tests for unary and binary operators
def number_operators(self, a, b, skip=[]):
dict = {'a': a, 'b': b}
for name, expr in list(self.binops.items()):
if name not in skip:
name = "__%s__" % name
if hasattr(a, name):
res = eval(expr, dict)
self.binop_test(a, b, res, expr, name)
for name, expr in list(self.unops.items()):
if name not in skip:
name = "__%s__" % name
if hasattr(a, name):
res = eval(expr, dict)
self.unop_test(a, res, expr, name)
def test_ints(self):
# Testing int operations...
self.number_operators(100, 3)
# The following crashes in Python 2.2
self.assertEqual((1).__bool__(), 1)
self.assertEqual((0).__bool__(), 0)
# This returns 'NotImplemented' in Python 2.2
class C(int):
def __add__(self, other):
return NotImplemented
self.assertEqual(C(5), 5)
try:
C() + ""
except TypeError:
pass
else:
self.fail("NotImplemented should have caused TypeError")
def test_floats(self):
# Testing float operations...
self.number_operators(100.0, 3.0)
def test_complexes(self):
# Testing complex operations...
self.number_operators(100.0j, 3.0j, skip=['lt', 'le', 'gt', 'ge',
'int', 'float',
'divmod', 'mod'])
class Number(complex):
__slots__ = ['prec']
def __new__(cls, *args, **kwds):
result = complex.__new__(cls, *args)
result.prec = kwds.get('prec', 12)
return result
def __repr__(self):
prec = self.prec
if self.imag == 0.0:
return "%.*g" % (prec, self.real)
if self.real == 0.0:
return "%.*gj" % (prec, self.imag)
return "(%.*g+%.*gj)" % (prec, self.real, prec, self.imag)
__str__ = __repr__
a = Number(3.14, prec=6)
self.assertEqual(repr(a), "3.14")
self.assertEqual(a.prec, 6)
a = Number(a, prec=2)
self.assertEqual(repr(a), "3.1")
self.assertEqual(a.prec, 2)
a = Number(234.5)
self.assertEqual(repr(a), "234.5")
self.assertEqual(a.prec, 12)
def test_explicit_reverse_methods(self):
# see issue 9930
self.assertEqual(complex.__radd__(3j, 4.0), complex(4.0, 3.0))
self.assertEqual(float.__rsub__(3.0, 1), -2.0)
@support.impl_detail("the module 'xxsubtype' is internal")
def test_spam_lists(self):
# Testing spamlist operations...
import copy, xxsubtype as spam
def spamlist(l, memo=None):
import xxsubtype as spam
return spam.spamlist(l)
# This is an ugly hack:
copy._deepcopy_dispatch[spam.spamlist] = spamlist
self.binop_test(spamlist([1]), spamlist([2]), spamlist([1,2]), "a+b",
"__add__")
self.binop_test(spamlist([1,2,3]), 2, 1, "b in a", "__contains__")
self.binop_test(spamlist([1,2,3]), 4, 0, "b in a", "__contains__")
self.binop_test(spamlist([1,2,3]), 1, 2, "a[b]", "__getitem__")
self.sliceop_test(spamlist([1,2,3]), 0, 2, spamlist([1,2]), "a[b:c]",
"__getitem__")
self.setop_test(spamlist([1]), spamlist([2]), spamlist([1,2]), "a+=b",
"__iadd__")
self.setop_test(spamlist([1,2]), 3, spamlist([1,2,1,2,1,2]), "a*=b",
"__imul__")
self.unop_test(spamlist([1,2,3]), 3, "len(a)", "__len__")
self.binop_test(spamlist([1,2]), 3, spamlist([1,2,1,2,1,2]), "a*b",
"__mul__")
self.binop_test(spamlist([1,2]), 3, spamlist([1,2,1,2,1,2]), "b*a",
"__rmul__")
self.set2op_test(spamlist([1,2]), 1, 3, spamlist([1,3]), "a[b]=c",
"__setitem__")
self.setsliceop_test(spamlist([1,2,3,4]), 1, 3, spamlist([5,6]),
spamlist([1,5,6,4]), "a[b:c]=d", "__setitem__")
# Test subclassing
class C(spam.spamlist):
def foo(self): return 1
a = C()
self.assertEqual(a, [])
self.assertEqual(a.foo(), 1)
a.append(100)
self.assertEqual(a, [100])
self.assertEqual(a.getstate(), 0)
a.setstate(42)
self.assertEqual(a.getstate(), 42)
@support.impl_detail("the module 'xxsubtype' is internal")
def test_spam_dicts(self):
# Testing spamdict operations...
import copy, xxsubtype as spam
def spamdict(d, memo=None):
import xxsubtype as spam
sd = spam.spamdict()
for k, v in list(d.items()):
sd[k] = v
return sd
# This is an ugly hack:
copy._deepcopy_dispatch[spam.spamdict] = spamdict
self.binop_test(spamdict({1:2,3:4}), 1, 1, "b in a", "__contains__")
self.binop_test(spamdict({1:2,3:4}), 2, 0, "b in a", "__contains__")
self.binop_test(spamdict({1:2,3:4}), 1, 2, "a[b]", "__getitem__")
d = spamdict({1:2,3:4})
l1 = []
for i in list(d.keys()):
l1.append(i)
l = []
for i in iter(d):
l.append(i)
self.assertEqual(l, l1)
l = []
for i in d.__iter__():
l.append(i)
self.assertEqual(l, l1)
l = []
for i in type(spamdict({})).__iter__(d):
l.append(i)
self.assertEqual(l, l1)
straightd = {1:2, 3:4}
spamd = spamdict(straightd)
self.unop_test(spamd, 2, "len(a)", "__len__")
self.unop_test(spamd, repr(straightd), "repr(a)", "__repr__")
self.set2op_test(spamdict({1:2,3:4}), 2, 3, spamdict({1:2,2:3,3:4}),
"a[b]=c", "__setitem__")
# Test subclassing
class C(spam.spamdict):
def foo(self): return 1
a = C()
self.assertEqual(list(a.items()), [])
self.assertEqual(a.foo(), 1)
a['foo'] = 'bar'
self.assertEqual(list(a.items()), [('foo', 'bar')])
self.assertEqual(a.getstate(), 0)
a.setstate(100)
self.assertEqual(a.getstate(), 100)
class ClassPropertiesAndMethods(unittest.TestCase):
def test_python_dicts(self):
# Testing Python subclass of dict...
self.assertTrue(issubclass(dict, dict))
self.assertIsInstance({}, dict)
d = dict()
self.assertEqual(d, {})
self.assertTrue(d.__class__ is dict)
self.assertIsInstance(d, dict)
class C(dict):
state = -1
def __init__(self_local, *a, **kw):
if a:
self.assertEqual(len(a), 1)
self_local.state = a[0]
if kw:
for k, v in list(kw.items()):
self_local[v] = k
def __getitem__(self, key):
return self.get(key, 0)
def __setitem__(self_local, key, value):
self.assertIsInstance(key, type(0))
dict.__setitem__(self_local, key, value)
def setstate(self, state):
self.state = state
def getstate(self):
return self.state
self.assertTrue(issubclass(C, dict))
a1 = C(12)
self.assertEqual(a1.state, 12)
a2 = C(foo=1, bar=2)
self.assertEqual(a2[1] == 'foo' and a2[2], 'bar')
a = C()
self.assertEqual(a.state, -1)
self.assertEqual(a.getstate(), -1)
a.setstate(0)
self.assertEqual(a.state, 0)
self.assertEqual(a.getstate(), 0)
a.setstate(10)
self.assertEqual(a.state, 10)
self.assertEqual(a.getstate(), 10)
self.assertEqual(a[42], 0)
a[42] = 24
self.assertEqual(a[42], 24)
N = 50
for i in range(N):
a[i] = C()
for j in range(N):
a[i][j] = i*j
for i in range(N):
for j in range(N):
self.assertEqual(a[i][j], i*j)
def test_python_lists(self):
# Testing Python subclass of list...
class C(list):
def __getitem__(self, i):
if isinstance(i, slice):
return i.start, i.stop
return list.__getitem__(self, i) + 100
a = C()
a.extend([0,1,2])
self.assertEqual(a[0], 100)
self.assertEqual(a[1], 101)
self.assertEqual(a[2], 102)
self.assertEqual(a[100:200], (100,200))
def test_metaclass(self):
# Testing metaclasses...
class C(metaclass=type):
def __init__(self):
self.__state = 0
def getstate(self):
return self.__state
def setstate(self, state):
self.__state = state
a = C()
self.assertEqual(a.getstate(), 0)
a.setstate(10)
self.assertEqual(a.getstate(), 10)
class _metaclass(type):
def myself(cls): return cls
class D(metaclass=_metaclass):
pass
self.assertEqual(D.myself(), D)
d = D()
self.assertEqual(d.__class__, D)
class M1(type):
def __new__(cls, name, bases, dict):
dict['__spam__'] = 1
return type.__new__(cls, name, bases, dict)
class C(metaclass=M1):
pass
self.assertEqual(C.__spam__, 1)
c = C()
self.assertEqual(c.__spam__, 1)
class _instance(object):
pass
class M2(object):
@staticmethod
def __new__(cls, name, bases, dict):
self = object.__new__(cls)
self.name = name
self.bases = bases
self.dict = dict
return self
def __call__(self):
it = _instance()
# Early binding of methods
for key in self.dict:
if key.startswith("__"):
continue
setattr(it, key, self.dict[key].__get__(it, self))
return it
class C(metaclass=M2):
def spam(self):
return 42
self.assertEqual(C.name, 'C')
self.assertEqual(C.bases, ())
self.assertIn('spam', C.dict)
c = C()
self.assertEqual(c.spam(), 42)
# More metaclass examples
class autosuper(type):
# Automatically add __super to the class
# This trick only works for dynamic classes
def __new__(metaclass, name, bases, dict):
cls = super(autosuper, metaclass).__new__(metaclass,
name, bases, dict)
# Name mangling for __super removes leading underscores
while name[:1] == "_":
name = name[1:]
if name:
name = "_%s__super" % name
else:
name = "__super"
setattr(cls, name, super(cls))
return cls
class A(metaclass=autosuper):
def meth(self):
return "A"
class B(A):
def meth(self):
return "B" + self.__super.meth()
class C(A):
def meth(self):
return "C" + self.__super.meth()
class D(C, B):
def meth(self):
return "D" + self.__super.meth()
self.assertEqual(D().meth(), "DCBA")
class E(B, C):
def meth(self):
return "E" + self.__super.meth()
self.assertEqual(E().meth(), "EBCA")
class autoproperty(type):
# Automatically create property attributes when methods
# named _get_x and/or _set_x are found
def __new__(metaclass, name, bases, dict):
hits = {}
for key, val in dict.items():
if key.startswith("_get_"):
key = key[5:]
get, set = hits.get(key, (None, None))
get = val
hits[key] = get, set
elif key.startswith("_set_"):
key = key[5:]
get, set = hits.get(key, (None, None))
set = val
hits[key] = get, set
for key, (get, set) in hits.items():
dict[key] = property(get, set)
return super(autoproperty, metaclass).__new__(metaclass,
name, bases, dict)
class A(metaclass=autoproperty):
def _get_x(self):
return -self.__x
def _set_x(self, x):
self.__x = -x
a = A()
self.assertTrue(not hasattr(a, "x"))
a.x = 12
self.assertEqual(a.x, 12)
self.assertEqual(a._A__x, -12)
class multimetaclass(autoproperty, autosuper):
# Merge of multiple cooperating metaclasses
pass
class A(metaclass=multimetaclass):
def _get_x(self):
return "A"
class B(A):
def _get_x(self):
return "B" + self.__super._get_x()
class C(A):
def _get_x(self):
return "C" + self.__super._get_x()
class D(C, B):
def _get_x(self):
return "D" + self.__super._get_x()
self.assertEqual(D().x, "DCBA")
# Make sure type(x) doesn't call x.__class__.__init__
class T(type):
counter = 0
def __init__(self, *args):
T.counter += 1
class C(metaclass=T):
pass
self.assertEqual(T.counter, 1)
a = C()
self.assertEqual(type(a), C)
self.assertEqual(T.counter, 1)
class C(object): pass
c = C()
try: c()
except TypeError: pass
else: self.fail("calling object w/o call method should raise "
"TypeError")
# Testing code to find most derived baseclass
class A(type):
def __new__(*args, **kwargs):
return type.__new__(*args, **kwargs)
class B(object):
pass
class C(object, metaclass=A):
pass
# The most derived metaclass of D is A rather than type.
class D(B, C):
pass
self.assertIs(A, type(D))
# issue1294232: correct metaclass calculation
new_calls = [] # to check the order of __new__ calls
class AMeta(type):
@staticmethod
def __new__(mcls, name, bases, ns):
new_calls.append('AMeta')
return super().__new__(mcls, name, bases, ns)
@classmethod
def __prepare__(mcls, name, bases):
return {}
class BMeta(AMeta):
@staticmethod
def __new__(mcls, name, bases, ns):
new_calls.append('BMeta')
return super().__new__(mcls, name, bases, ns)
@classmethod
def __prepare__(mcls, name, bases):
ns = super().__prepare__(name, bases)
ns['BMeta_was_here'] = True
return ns
class A(metaclass=AMeta):
pass
self.assertEqual(['AMeta'], new_calls)
new_calls.clear()
class B(metaclass=BMeta):
pass
# BMeta.__new__ calls AMeta.__new__ with super:
self.assertEqual(['BMeta', 'AMeta'], new_calls)
new_calls.clear()
class C(A, B):
pass
# The most derived metaclass is BMeta:
self.assertEqual(['BMeta', 'AMeta'], new_calls)
new_calls.clear()
# BMeta.__prepare__ should've been called:
self.assertIn('BMeta_was_here', C.__dict__)
# The order of the bases shouldn't matter:
class C2(B, A):
pass
self.assertEqual(['BMeta', 'AMeta'], new_calls)
new_calls.clear()
self.assertIn('BMeta_was_here', C2.__dict__)
# Check correct metaclass calculation when a metaclass is declared:
class D(C, metaclass=type):
pass
self.assertEqual(['BMeta', 'AMeta'], new_calls)
new_calls.clear()
self.assertIn('BMeta_was_here', D.__dict__)
class E(C, metaclass=AMeta):
pass
self.assertEqual(['BMeta', 'AMeta'], new_calls)
new_calls.clear()
self.assertIn('BMeta_was_here', E.__dict__)
# Special case: the given metaclass isn't a class,
# so there is no metaclass calculation.
marker = object()
def func(*args, **kwargs):
return marker
class X(metaclass=func):
pass
class Y(object, metaclass=func):
pass
class Z(D, metaclass=func):
pass
self.assertIs(marker, X)
self.assertIs(marker, Y)
self.assertIs(marker, Z)
# The given metaclass is a class,
# but not a descendant of type.
prepare_calls = [] # to track __prepare__ calls
class ANotMeta:
def __new__(mcls, *args, **kwargs):
new_calls.append('ANotMeta')
return super().__new__(mcls)
@classmethod
def __prepare__(mcls, name, bases):
prepare_calls.append('ANotMeta')
return {}
class BNotMeta(ANotMeta):
def __new__(mcls, *args, **kwargs):
new_calls.append('BNotMeta')
return super().__new__(mcls)
@classmethod
def __prepare__(mcls, name, bases):
prepare_calls.append('BNotMeta')
return super().__prepare__(name, bases)
class A(metaclass=ANotMeta):
pass
self.assertIs(ANotMeta, type(A))
self.assertEqual(['ANotMeta'], prepare_calls)
prepare_calls.clear()
self.assertEqual(['ANotMeta'], new_calls)
new_calls.clear()
class B(metaclass=BNotMeta):
pass
self.assertIs(BNotMeta, type(B))
self.assertEqual(['BNotMeta', 'ANotMeta'], prepare_calls)
prepare_calls.clear()
self.assertEqual(['BNotMeta', 'ANotMeta'], new_calls)
new_calls.clear()
class C(A, B):
pass
self.assertIs(BNotMeta, type(C))
self.assertEqual(['BNotMeta', 'ANotMeta'], new_calls)
new_calls.clear()
self.assertEqual(['BNotMeta', 'ANotMeta'], prepare_calls)
prepare_calls.clear()
class C2(B, A):
pass
self.assertIs(BNotMeta, type(C2))
self.assertEqual(['BNotMeta', 'ANotMeta'], new_calls)
new_calls.clear()
self.assertEqual(['BNotMeta', 'ANotMeta'], prepare_calls)
prepare_calls.clear()
# This is a TypeError, because of a metaclass conflict:
# BNotMeta is neither a subclass, nor a superclass of type
with self.assertRaises(TypeError):
class D(C, metaclass=type):
pass
class E(C, metaclass=ANotMeta):
pass
self.assertIs(BNotMeta, type(E))
self.assertEqual(['BNotMeta', 'ANotMeta'], new_calls)
new_calls.clear()
self.assertEqual(['BNotMeta', 'ANotMeta'], prepare_calls)
prepare_calls.clear()
class F(object(), C):
pass
self.assertIs(BNotMeta, type(F))
self.assertEqual(['BNotMeta', 'ANotMeta'], new_calls)
new_calls.clear()
self.assertEqual(['BNotMeta', 'ANotMeta'], prepare_calls)
prepare_calls.clear()
class F2(C, object()):
pass
self.assertIs(BNotMeta, type(F2))
self.assertEqual(['BNotMeta', 'ANotMeta'], new_calls)
new_calls.clear()
self.assertEqual(['BNotMeta', 'ANotMeta'], prepare_calls)
prepare_calls.clear()
# TypeError: BNotMeta is neither a
# subclass, nor a superclass of int
with self.assertRaises(TypeError):
class X(C, int()):
pass
with self.assertRaises(TypeError):
class X(int(), C):
pass
def test_module_subclasses(self):
# Testing Python subclass of module...
log = []
MT = type(sys)
class MM(MT):
def __init__(self, name):
MT.__init__(self, name)
def __getattribute__(self, name):
log.append(("getattr", name))
return MT.__getattribute__(self, name)
def __setattr__(self, name, value):
log.append(("setattr", name, value))
MT.__setattr__(self, name, value)
def __delattr__(self, name):
log.append(("delattr", name))
MT.__delattr__(self, name)
a = MM("a")
a.foo = 12
x = a.foo
del a.foo
self.assertEqual(log, [("setattr", "foo", 12),
("getattr", "foo"),
("delattr", "foo")])
# http://python.org/sf/1174712
try:
class Module(types.ModuleType, str):
pass
except TypeError:
pass
else:
self.fail("inheriting from ModuleType and str at the same time "
"should fail")
def test_multiple_inheritance(self):
# Testing multiple inheritance...
class C(object):
def __init__(self):
self.__state = 0
def getstate(self):
return self.__state
def setstate(self, state):
self.__state = state
a = C()
self.assertEqual(a.getstate(), 0)
a.setstate(10)
self.assertEqual(a.getstate(), 10)
class D(dict, C):
def __init__(self):
type({}).__init__(self)
C.__init__(self)
d = D()
self.assertEqual(list(d.keys()), [])
d["hello"] = "world"
self.assertEqual(list(d.items()), [("hello", "world")])
self.assertEqual(d["hello"], "world")
self.assertEqual(d.getstate(), 0)
d.setstate(10)
self.assertEqual(d.getstate(), 10)
self.assertEqual(D.__mro__, (D, dict, C, object))
# SF bug #442833
class Node(object):
def __int__(self):
return int(self.foo())
def foo(self):
return "23"
class Frag(Node, list):
def foo(self):
return "42"
self.assertEqual(Node().__int__(), 23)
self.assertEqual(int(Node()), 23)
self.assertEqual(Frag().__int__(), 42)
self.assertEqual(int(Frag()), 42)
def test_diamond_inheritence(self):
# Testing multiple inheritance special cases...
class A(object):
def spam(self): return "A"
self.assertEqual(A().spam(), "A")
class B(A):
def boo(self): return "B"
def spam(self): return "B"
self.assertEqual(B().spam(), "B")
self.assertEqual(B().boo(), "B")
class C(A):
def boo(self): return "C"
self.assertEqual(C().spam(), "A")
self.assertEqual(C().boo(), "C")
class D(B, C): pass
self.assertEqual(D().spam(), "B")
self.assertEqual(D().boo(), "B")
self.assertEqual(D.__mro__, (D, B, C, A, object))
class E(C, B): pass
self.assertEqual(E().spam(), "B")
self.assertEqual(E().boo(), "C")
self.assertEqual(E.__mro__, (E, C, B, A, object))
# MRO order disagreement
try:
class F(D, E): pass
except TypeError:
pass
else:
self.fail("expected MRO order disagreement (F)")
try:
class G(E, D): pass
except TypeError:
pass
else:
self.fail("expected MRO order disagreement (G)")
# see thread python-dev/2002-October/029035.html
def test_ex5_from_c3_switch(self):
# Testing ex5 from C3 switch discussion...
class A(object): pass
class B(object): pass
class C(object): pass
class X(A): pass
class Y(A): pass
class Z(X,B,Y,C): pass
self.assertEqual(Z.__mro__, (Z, X, B, Y, A, C, object))
# see "A Monotonic Superclass Linearization for Dylan",
# by Kim Barrett et al. (OOPSLA 1996)
def test_monotonicity(self):
# Testing MRO monotonicity...
class Boat(object): pass
class DayBoat(Boat): pass
class WheelBoat(Boat): pass
class EngineLess(DayBoat): pass
class SmallMultihull(DayBoat): pass
class PedalWheelBoat(EngineLess,WheelBoat): pass
class SmallCatamaran(SmallMultihull): pass
class Pedalo(PedalWheelBoat,SmallCatamaran): pass
self.assertEqual(PedalWheelBoat.__mro__,
(PedalWheelBoat, EngineLess, DayBoat, WheelBoat, Boat, object))
self.assertEqual(SmallCatamaran.__mro__,
(SmallCatamaran, SmallMultihull, DayBoat, Boat, object))
self.assertEqual(Pedalo.__mro__,
(Pedalo, PedalWheelBoat, EngineLess, SmallCatamaran,
SmallMultihull, DayBoat, WheelBoat, Boat, object))
# see "A Monotonic Superclass Linearization for Dylan",
# by Kim Barrett et al. (OOPSLA 1996)
def test_consistency_with_epg(self):
# Testing consistency with EPG...
class Pane(object): pass
class ScrollingMixin(object): pass
class EditingMixin(object): pass
class ScrollablePane(Pane,ScrollingMixin): pass
class EditablePane(Pane,EditingMixin): pass
class EditableScrollablePane(ScrollablePane,EditablePane): pass
self.assertEqual(EditableScrollablePane.__mro__,
(EditableScrollablePane, ScrollablePane, EditablePane, Pane,
ScrollingMixin, EditingMixin, object))
def test_mro_disagreement(self):
# Testing error messages for MRO disagreement...
mro_err_msg = """Cannot create a consistent method resolution
order (MRO) for bases """
def raises(exc, expected, callable, *args):
try:
callable(*args)
except exc as msg:
# the exact msg is generally considered an impl detail
if support.check_impl_detail():
if not str(msg).startswith(expected):
self.fail("Message %r, expected %r" %
(str(msg), expected))
else:
self.fail("Expected %s" % exc)
class A(object): pass
class B(A): pass
class C(object): pass
# Test some very simple errors
raises(TypeError, "duplicate base class A",
type, "X", (A, A), {})
raises(TypeError, mro_err_msg,
type, "X", (A, B), {})
raises(TypeError, mro_err_msg,
type, "X", (A, C, B), {})
# Test a slightly more complex error
class GridLayout(object): pass
class HorizontalGrid(GridLayout): pass
class VerticalGrid(GridLayout): pass
class HVGrid(HorizontalGrid, VerticalGrid): pass
class VHGrid(VerticalGrid, HorizontalGrid): pass
raises(TypeError, mro_err_msg,
type, "ConfusedGrid", (HVGrid, VHGrid), {})
def test_object_class(self):
# Testing object class...
a = object()
self.assertEqual(a.__class__, object)
self.assertEqual(type(a), object)
b = object()
self.assertNotEqual(a, b)
self.assertFalse(hasattr(a, "foo"))
try:
a.foo = 12
except (AttributeError, TypeError):
pass
else:
self.fail("object() should not allow setting a foo attribute")
self.assertFalse(hasattr(object(), "__dict__"))
class Cdict(object):
pass
x = Cdict()
self.assertEqual(x.__dict__, {})
x.foo = 1
self.assertEqual(x.foo, 1)
self.assertEqual(x.__dict__, {'foo': 1})
def test_slots(self):
# Testing __slots__...
class C0(object):
__slots__ = []
x = C0()
self.assertFalse(hasattr(x, "__dict__"))
self.assertFalse(hasattr(x, "foo"))
class C1(object):
__slots__ = ['a']
x = C1()
self.assertFalse(hasattr(x, "__dict__"))
self.assertFalse(hasattr(x, "a"))
x.a = 1
self.assertEqual(x.a, 1)
x.a = None
self.assertEqual(x.a, None)
del x.a
self.assertFalse(hasattr(x, "a"))
class C3(object):
__slots__ = ['a', 'b', 'c']
x = C3()
self.assertFalse(hasattr(x, "__dict__"))
self.assertFalse(hasattr(x, 'a'))
self.assertFalse(hasattr(x, 'b'))
self.assertFalse(hasattr(x, 'c'))
x.a = 1
x.b = 2
x.c = 3
self.assertEqual(x.a, 1)
self.assertEqual(x.b, 2)
self.assertEqual(x.c, 3)
class C4(object):
"""Validate name mangling"""
__slots__ = ['__a']
def __init__(self, value):
self.__a = value
def get(self):
return self.__a
x = C4(5)
self.assertFalse(hasattr(x, '__dict__'))
self.assertFalse(hasattr(x, '__a'))
self.assertEqual(x.get(), 5)
try:
x.__a = 6
except AttributeError:
pass
else:
self.fail("Double underscored names not mangled")
# Make sure slot names are proper identifiers
try:
class C(object):
__slots__ = [None]
except TypeError:
pass
else:
self.fail("[None] slots not caught")
try:
class C(object):
__slots__ = ["foo bar"]
except TypeError:
pass
else:
self.fail("['foo bar'] slots not caught")
try:
class C(object):
__slots__ = ["foo\0bar"]
except TypeError:
pass
else:
self.fail("['foo\\0bar'] slots not caught")
try:
class C(object):
__slots__ = ["1"]
except TypeError:
pass
else:
self.fail("['1'] slots not caught")
try:
class C(object):
__slots__ = [""]
except TypeError:
pass
else:
self.fail("[''] slots not caught")
class C(object):
__slots__ = ["a", "a_b", "_a", "A0123456789Z"]
# XXX(nnorwitz): was there supposed to be something tested
# from the class above?
# Test a single string is not expanded as a sequence.
class C(object):
__slots__ = "abc"
c = C()
c.abc = 5
self.assertEqual(c.abc, 5)
# Test unicode slot names
# Test a single unicode string is not expanded as a sequence.
class C(object):
__slots__ = "abc"
c = C()
c.abc = 5
self.assertEqual(c.abc, 5)
# _unicode_to_string used to modify slots in certain circumstances
slots = ("foo", "bar")
class C(object):
__slots__ = slots
x = C()
x.foo = 5
self.assertEqual(x.foo, 5)
self.assertTrue(type(slots[0]) is str)
# this used to leak references
try:
class C(object):
__slots__ = [chr(128)]
except (TypeError, UnicodeEncodeError):
pass
else:
raise TestFailed("[chr(128)] slots not caught")
# Test leaks
class Counted(object):
counter = 0 # counts the number of instances alive
def __init__(self):
Counted.counter += 1
def __del__(self):
Counted.counter -= 1
class C(object):
__slots__ = ['a', 'b', 'c']
x = C()
x.a = Counted()
x.b = Counted()
x.c = Counted()
self.assertEqual(Counted.counter, 3)
del x
support.gc_collect()
self.assertEqual(Counted.counter, 0)
class D(C):
pass
x = D()
x.a = Counted()
x.z = Counted()
self.assertEqual(Counted.counter, 2)
del x
support.gc_collect()
self.assertEqual(Counted.counter, 0)
class E(D):
__slots__ = ['e']
x = E()
x.a = Counted()
x.z = Counted()
x.e = Counted()
self.assertEqual(Counted.counter, 3)
del x
support.gc_collect()
self.assertEqual(Counted.counter, 0)
# Test cyclical leaks [SF bug 519621]
class F(object):
__slots__ = ['a', 'b']
s = F()
s.a = [Counted(), s]
self.assertEqual(Counted.counter, 1)
s = None
support.gc_collect()
self.assertEqual(Counted.counter, 0)
# Test lookup leaks [SF bug 572567]
if hasattr(gc, 'get_objects'):
class G(object):
def __eq__(self, other):
return False
g = G()
orig_objects = len(gc.get_objects())
for i in range(10):
g==g
new_objects = len(gc.get_objects())
self.assertEqual(orig_objects, new_objects)
class H(object):
__slots__ = ['a', 'b']
def __init__(self):
self.a = 1
self.b = 2
def __del__(self_):
self.assertEqual(self_.a, 1)
self.assertEqual(self_.b, 2)
with support.captured_output('stderr') as s:
h = H()
del h
self.assertEqual(s.getvalue(), '')
class X(object):
__slots__ = "a"
with self.assertRaises(AttributeError):
del X().a
def test_slots_special(self):
# Testing __dict__ and __weakref__ in __slots__...
class D(object):
__slots__ = ["__dict__"]
a = D()
self.assertTrue(hasattr(a, "__dict__"))
self.assertFalse(hasattr(a, "__weakref__"))
a.foo = 42
self.assertEqual(a.__dict__, {"foo": 42})
class W(object):
__slots__ = ["__weakref__"]
a = W()
self.assertTrue(hasattr(a, "__weakref__"))
self.assertFalse(hasattr(a, "__dict__"))
try:
a.foo = 42
except AttributeError:
pass
else:
self.fail("shouldn't be allowed to set a.foo")
class C1(W, D):
__slots__ = []
a = C1()
self.assertTrue(hasattr(a, "__dict__"))
self.assertTrue(hasattr(a, "__weakref__"))
a.foo = 42
self.assertEqual(a.__dict__, {"foo": 42})
class C2(D, W):
__slots__ = []
a = C2()
self.assertTrue(hasattr(a, "__dict__"))
self.assertTrue(hasattr(a, "__weakref__"))
a.foo = 42
self.assertEqual(a.__dict__, {"foo": 42})
def test_slots_descriptor(self):
# Issue2115: slot descriptors did not correctly check
# the type of the given object
import abc
class MyABC(metaclass=abc.ABCMeta):
__slots__ = "a"
class Unrelated(object):
pass
MyABC.register(Unrelated)
u = Unrelated()
self.assertIsInstance(u, MyABC)
# This used to crash
self.assertRaises(TypeError, MyABC.a.__set__, u, 3)
def test_dynamics(self):
# Testing class attribute propagation...
class D(object):
pass
class E(D):
pass
class F(D):
pass
D.foo = 1
self.assertEqual(D.foo, 1)
# Test that dynamic attributes are inherited
self.assertEqual(E.foo, 1)
self.assertEqual(F.foo, 1)
# Test dynamic instances
class C(object):
pass
a = C()
self.assertFalse(hasattr(a, "foobar"))
C.foobar = 2
self.assertEqual(a.foobar, 2)
C.method = lambda self: 42
self.assertEqual(a.method(), 42)
C.__repr__ = lambda self: "C()"
self.assertEqual(repr(a), "C()")
C.__int__ = lambda self: 100
self.assertEqual(int(a), 100)
self.assertEqual(a.foobar, 2)
self.assertFalse(hasattr(a, "spam"))
def mygetattr(self, name):
if name == "spam":
return "spam"
raise AttributeError
C.__getattr__ = mygetattr
self.assertEqual(a.spam, "spam")
a.new = 12
self.assertEqual(a.new, 12)
def mysetattr(self, name, value):
if name == "spam":
raise AttributeError
return object.__setattr__(self, name, value)
C.__setattr__ = mysetattr
try:
a.spam = "not spam"
except AttributeError:
pass
else:
self.fail("expected AttributeError")
self.assertEqual(a.spam, "spam")
class D(C):
pass
d = D()
d.foo = 1
self.assertEqual(d.foo, 1)
# Test handling of int*seq and seq*int
class I(int):
pass
self.assertEqual("a"*I(2), "aa")
self.assertEqual(I(2)*"a", "aa")
self.assertEqual(2*I(3), 6)
self.assertEqual(I(3)*2, 6)
self.assertEqual(I(3)*I(2), 6)
# Test comparison of classes with dynamic metaclasses
class dynamicmetaclass(type):
pass
class someclass(metaclass=dynamicmetaclass):
pass
self.assertNotEqual(someclass, object)
def test_errors(self):
# Testing errors...
try:
class C(list, dict):
pass
except TypeError:
pass
else:
self.fail("inheritance from both list and dict should be illegal")
try:
class C(object, None):
pass
except TypeError:
pass
else:
self.fail("inheritance from non-type should be illegal")
class Classic:
pass
try:
class C(type(len)):
pass
except TypeError:
pass
else:
self.fail("inheritance from CFunction should be illegal")
try:
class C(object):
__slots__ = 1
except TypeError:
pass
else:
self.fail("__slots__ = 1 should be illegal")
try:
class C(object):
__slots__ = [1]
except TypeError:
pass
else:
self.fail("__slots__ = [1] should be illegal")
class M1(type):
pass
class M2(type):
pass
class A1(object, metaclass=M1):
pass
class A2(object, metaclass=M2):
pass
try:
class B(A1, A2):
pass
except TypeError:
pass
else:
self.fail("finding the most derived metaclass should have failed")
def test_classmethods(self):
# Testing class methods...
class C(object):
def foo(*a): return a
goo = classmethod(foo)
c = C()
self.assertEqual(C.goo(1), (C, 1))
self.assertEqual(c.goo(1), (C, 1))
self.assertEqual(c.foo(1), (c, 1))
class D(C):
pass
d = D()
self.assertEqual(D.goo(1), (D, 1))
self.assertEqual(d.goo(1), (D, 1))
self.assertEqual(d.foo(1), (d, 1))
self.assertEqual(D.foo(d, 1), (d, 1))
# Test for a specific crash (SF bug 528132)
def f(cls, arg): return (cls, arg)
ff = classmethod(f)
self.assertEqual(ff.__get__(0, int)(42), (int, 42))
self.assertEqual(ff.__get__(0)(42), (int, 42))
# Test super() with classmethods (SF bug 535444)
self.assertEqual(C.goo.__self__, C)
self.assertEqual(D.goo.__self__, D)
self.assertEqual(super(D,D).goo.__self__, D)
self.assertEqual(super(D,d).goo.__self__, D)
self.assertEqual(super(D,D).goo(), (D,))
self.assertEqual(super(D,d).goo(), (D,))
# Verify that a non-callable will raise
meth = classmethod(1).__get__(1)
self.assertRaises(TypeError, meth)
# Verify that classmethod() doesn't allow keyword args
try:
classmethod(f, kw=1)
except TypeError:
pass
else:
self.fail("classmethod shouldn't accept keyword args")
cm = classmethod(f)
self.assertEqual(cm.__dict__, {})
cm.x = 42
self.assertEqual(cm.x, 42)
self.assertEqual(cm.__dict__, {"x" : 42})
del cm.x
self.assertFalse(hasattr(cm, "x"))
@support.impl_detail("the module 'xxsubtype' is internal")
def test_classmethods_in_c(self):
# Testing C-based class methods...
import xxsubtype as spam
a = (1, 2, 3)
d = {'abc': 123}
x, a1, d1 = spam.spamlist.classmeth(*a, **d)
self.assertEqual(x, spam.spamlist)
self.assertEqual(a, a1)
self.assertEqual(d, d1)
x, a1, d1 = spam.spamlist().classmeth(*a, **d)
self.assertEqual(x, spam.spamlist)
self.assertEqual(a, a1)
self.assertEqual(d, d1)
spam_cm = spam.spamlist.__dict__['classmeth']
x2, a2, d2 = spam_cm(spam.spamlist, *a, **d)
self.assertEqual(x2, spam.spamlist)
self.assertEqual(a2, a1)
self.assertEqual(d2, d1)
class SubSpam(spam.spamlist): pass
x2, a2, d2 = spam_cm(SubSpam, *a, **d)
self.assertEqual(x2, SubSpam)
self.assertEqual(a2, a1)
self.assertEqual(d2, d1)
with self.assertRaises(TypeError):
spam_cm()
with self.assertRaises(TypeError):
spam_cm(spam.spamlist())
with self.assertRaises(TypeError):
spam_cm(list)
def test_staticmethods(self):
# Testing static methods...
class C(object):
def foo(*a): return a
goo = staticmethod(foo)
c = C()
self.assertEqual(C.goo(1), (1,))
self.assertEqual(c.goo(1), (1,))
self.assertEqual(c.foo(1), (c, 1,))
class D(C):
pass
d = D()
self.assertEqual(D.goo(1), (1,))
self.assertEqual(d.goo(1), (1,))
self.assertEqual(d.foo(1), (d, 1))
self.assertEqual(D.foo(d, 1), (d, 1))
sm = staticmethod(None)
self.assertEqual(sm.__dict__, {})
sm.x = 42
self.assertEqual(sm.x, 42)
self.assertEqual(sm.__dict__, {"x" : 42})
del sm.x
self.assertFalse(hasattr(sm, "x"))
@support.impl_detail("the module 'xxsubtype' is internal")
def test_staticmethods_in_c(self):
# Testing C-based static methods...
import xxsubtype as spam
a = (1, 2, 3)
d = {"abc": 123}
x, a1, d1 = spam.spamlist.staticmeth(*a, **d)
self.assertEqual(x, None)
self.assertEqual(a, a1)
self.assertEqual(d, d1)
x, a1, d2 = spam.spamlist().staticmeth(*a, **d)
self.assertEqual(x, None)
self.assertEqual(a, a1)
self.assertEqual(d, d1)
def test_classic(self):
# Testing classic classes...
class C:
def foo(*a): return a
goo = classmethod(foo)
c = C()
self.assertEqual(C.goo(1), (C, 1))
self.assertEqual(c.goo(1), (C, 1))
self.assertEqual(c.foo(1), (c, 1))
class D(C):
pass
d = D()
self.assertEqual(D.goo(1), (D, 1))
self.assertEqual(d.goo(1), (D, 1))
self.assertEqual(d.foo(1), (d, 1))
self.assertEqual(D.foo(d, 1), (d, 1))
class E: # *not* subclassing from C
foo = C.foo
self.assertEqual(E().foo.__func__, C.foo) # i.e., unbound
self.assertTrue(repr(C.foo.__get__(C())).startswith("<bound method "))
def test_compattr(self):
# Testing computed attributes...
class C(object):
class computed_attribute(object):
def __init__(self, get, set=None, delete=None):
self.__get = get
self.__set = set
self.__delete = delete
def __get__(self, obj, type=None):
return self.__get(obj)
def __set__(self, obj, value):
return self.__set(obj, value)
def __delete__(self, obj):
return self.__delete(obj)
def __init__(self):
self.__x = 0
def __get_x(self):
x = self.__x
self.__x = x+1
return x
def __set_x(self, x):
self.__x = x
def __delete_x(self):
del self.__x
x = computed_attribute(__get_x, __set_x, __delete_x)
a = C()
self.assertEqual(a.x, 0)
self.assertEqual(a.x, 1)
a.x = 10
self.assertEqual(a.x, 10)
self.assertEqual(a.x, 11)
del a.x
self.assertEqual(hasattr(a, 'x'), 0)
def test_newslots(self):
# Testing __new__ slot override...
class C(list):
def __new__(cls):
self = list.__new__(cls)
self.foo = 1
return self
def __init__(self):
self.foo = self.foo + 2
a = C()
self.assertEqual(a.foo, 3)
self.assertEqual(a.__class__, C)
class D(C):
pass
b = D()
self.assertEqual(b.foo, 3)
self.assertEqual(b.__class__, D)
def test_altmro(self):
# Testing mro() and overriding it...
class A(object):
def f(self): return "A"
class B(A):
pass
class C(A):
def f(self): return "C"
class D(B, C):
pass
self.assertEqual(D.mro(), [D, B, C, A, object])
self.assertEqual(D.__mro__, (D, B, C, A, object))
self.assertEqual(D().f(), "C")
class PerverseMetaType(type):
def mro(cls):
L = type.mro(cls)
L.reverse()
return L
class X(D,B,C,A, metaclass=PerverseMetaType):
pass
self.assertEqual(X.__mro__, (object, A, C, B, D, X))
self.assertEqual(X().f(), "A")
try:
class _metaclass(type):
def mro(self):
return [self, dict, object]
class X(object, metaclass=_metaclass):
pass
# In CPython, the class creation above already raises
# TypeError, as a protection against the fact that
# instances of X would segfault it. In other Python
# implementations it would be ok to let the class X
# be created, but instead get a clean TypeError on the
# __setitem__ below.
x = object.__new__(X)
x[5] = 6
except TypeError:
pass
else:
self.fail("devious mro() return not caught")
try:
class _metaclass(type):
def mro(self):
return [1]
class X(object, metaclass=_metaclass):
pass
except TypeError:
pass
else:
self.fail("non-class mro() return not caught")
try:
class _metaclass(type):
def mro(self):
return 1
class X(object, metaclass=_metaclass):
pass
except TypeError:
pass
else:
self.fail("non-sequence mro() return not caught")
def test_overloading(self):
# Testing operator overloading...
class B(object):
"Intermediate class because object doesn't have a __setattr__"
class C(B):
def __getattr__(self, name):
if name == "foo":
return ("getattr", name)
else:
raise AttributeError
def __setattr__(self, name, value):
if name == "foo":
self.setattr = (name, value)
else:
return B.__setattr__(self, name, value)
def __delattr__(self, name):
if name == "foo":
self.delattr = name
else:
return B.__delattr__(self, name)
def __getitem__(self, key):
return ("getitem", key)
def __setitem__(self, key, value):
self.setitem = (key, value)
def __delitem__(self, key):
self.delitem = key
a = C()
self.assertEqual(a.foo, ("getattr", "foo"))
a.foo = 12
self.assertEqual(a.setattr, ("foo", 12))
del a.foo
self.assertEqual(a.delattr, "foo")
self.assertEqual(a[12], ("getitem", 12))
a[12] = 21
self.assertEqual(a.setitem, (12, 21))
del a[12]
self.assertEqual(a.delitem, 12)
self.assertEqual(a[0:10], ("getitem", slice(0, 10)))
a[0:10] = "foo"
self.assertEqual(a.setitem, (slice(0, 10), "foo"))
del a[0:10]
self.assertEqual(a.delitem, (slice(0, 10)))
def test_methods(self):
# Testing methods...
class C(object):
def __init__(self, x):
self.x = x
def foo(self):
return self.x
c1 = C(1)
self.assertEqual(c1.foo(), 1)
class D(C):
boo = C.foo
goo = c1.foo
d2 = D(2)
self.assertEqual(d2.foo(), 2)
self.assertEqual(d2.boo(), 2)
self.assertEqual(d2.goo(), 1)
class E(object):
foo = C.foo
self.assertEqual(E().foo.__func__, C.foo) # i.e., unbound
self.assertTrue(repr(C.foo.__get__(C(1))).startswith("<bound method "))
def test_special_method_lookup(self):
# The lookup of special methods bypasses __getattr__ and
# __getattribute__, but they still can be descriptors.
def run_context(manager):
with manager:
pass
def iden(self):
return self
def hello(self):
return b"hello"
def empty_seq(self):
return []
def zero(self):
return 0
def complex_num(self):
return 1j
def stop(self):
raise StopIteration
def return_true(self, thing=None):
return True
def do_isinstance(obj):
return isinstance(int, obj)
def do_issubclass(obj):
return issubclass(int, obj)
def do_dict_missing(checker):
class DictSub(checker.__class__, dict):
pass
self.assertEqual(DictSub()["hi"], 4)
def some_number(self_, key):
self.assertEqual(key, "hi")
return 4
def swallow(*args): pass
def format_impl(self, spec):
return "hello"
# It would be nice to have every special method tested here, but I'm
# only listing the ones I can remember outside of typeobject.c, since it
# does it right.
specials = [
("__bytes__", bytes, hello, set(), {}),
("__reversed__", reversed, empty_seq, set(), {}),
("__length_hint__", list, zero, set(),
{"__iter__" : iden, "__next__" : stop}),
("__sizeof__", sys.getsizeof, zero, set(), {}),
("__instancecheck__", do_isinstance, return_true, set(), {}),
("__missing__", do_dict_missing, some_number,
set(("__class__",)), {}),
("__subclasscheck__", do_issubclass, return_true,
set(("__bases__",)), {}),
("__enter__", run_context, iden, set(), {"__exit__" : swallow}),
("__exit__", run_context, swallow, set(), {"__enter__" : iden}),
("__complex__", complex, complex_num, set(), {}),
("__format__", format, format_impl, set(), {}),
("__floor__", math.floor, zero, set(), {}),
("__trunc__", math.trunc, zero, set(), {}),
("__trunc__", int, zero, set(), {}),
("__ceil__", math.ceil, zero, set(), {}),
("__dir__", dir, empty_seq, set(), {}),
]
class Checker(object):
def __getattr__(self, attr, test=self):
test.fail("__getattr__ called with {0}".format(attr))
def __getattribute__(self, attr, test=self):
if attr not in ok:
test.fail("__getattribute__ called with {0}".format(attr))
return object.__getattribute__(self, attr)
class SpecialDescr(object):
def __init__(self, impl):
self.impl = impl
def __get__(self, obj, owner):
record.append(1)
return self.impl.__get__(obj, owner)
class MyException(Exception):
pass
class ErrDescr(object):
def __get__(self, obj, owner):
raise MyException
for name, runner, meth_impl, ok, env in specials:
class X(Checker):
pass
for attr, obj in env.items():
setattr(X, attr, obj)
setattr(X, name, meth_impl)
runner(X())
record = []
class X(Checker):
pass
for attr, obj in env.items():
setattr(X, attr, obj)
setattr(X, name, SpecialDescr(meth_impl))
runner(X())
self.assertEqual(record, [1], name)
class X(Checker):
pass
for attr, obj in env.items():
setattr(X, attr, obj)
setattr(X, name, ErrDescr())
self.assertRaises(MyException, runner, X())
def test_specials(self):
# Testing special operators...
# Test operators like __hash__ for which a built-in default exists
# Test the default behavior for static classes
class C(object):
def __getitem__(self, i):
if 0 <= i < 10: return i
raise IndexError
c1 = C()
c2 = C()
self.assertTrue(not not c1) # What?
self.assertNotEqual(id(c1), id(c2))
hash(c1)
hash(c2)
self.assertEqual(c1, c1)
self.assertTrue(c1 != c2)
self.assertTrue(not c1 != c1)
self.assertTrue(not c1 == c2)
# Note that the module name appears in str/repr, and that varies
# depending on whether this test is run standalone or from a framework.
self.assertTrue(str(c1).find('C object at ') >= 0)
self.assertEqual(str(c1), repr(c1))
self.assertNotIn(-1, c1)
for i in range(10):
self.assertIn(i, c1)
self.assertNotIn(10, c1)
# Test the default behavior for dynamic classes
class D(object):
def __getitem__(self, i):
if 0 <= i < 10: return i
raise IndexError
d1 = D()
d2 = D()
self.assertTrue(not not d1)
self.assertNotEqual(id(d1), id(d2))
hash(d1)
hash(d2)
self.assertEqual(d1, d1)
self.assertNotEqual(d1, d2)
self.assertTrue(not d1 != d1)
self.assertTrue(not d1 == d2)
# Note that the module name appears in str/repr, and that varies
# depending on whether this test is run standalone or from a framework.
self.assertTrue(str(d1).find('D object at ') >= 0)
self.assertEqual(str(d1), repr(d1))
self.assertNotIn(-1, d1)
for i in range(10):
self.assertIn(i, d1)
self.assertNotIn(10, d1)
# Test overridden behavior
class Proxy(object):
def __init__(self, x):
self.x = x
def __bool__(self):
return not not self.x
def __hash__(self):
return hash(self.x)
def __eq__(self, other):
return self.x == other
def __ne__(self, other):
return self.x != other
def __ge__(self, other):
return self.x >= other
def __gt__(self, other):
return self.x > other
def __le__(self, other):
return self.x <= other
def __lt__(self, other):
return self.x < other
def __str__(self):
return "Proxy:%s" % self.x
def __repr__(self):
return "Proxy(%r)" % self.x
def __contains__(self, value):
return value in self.x
p0 = Proxy(0)
p1 = Proxy(1)
p_1 = Proxy(-1)
self.assertFalse(p0)
self.assertTrue(not not p1)
self.assertEqual(hash(p0), hash(0))
self.assertEqual(p0, p0)
self.assertNotEqual(p0, p1)
self.assertTrue(not p0 != p0)
self.assertEqual(not p0, p1)
self.assertTrue(p0 < p1)
self.assertTrue(p0 <= p1)
self.assertTrue(p1 > p0)
self.assertTrue(p1 >= p0)
self.assertEqual(str(p0), "Proxy:0")
self.assertEqual(repr(p0), "Proxy(0)")
p10 = Proxy(range(10))
self.assertNotIn(-1, p10)
for i in range(10):
self.assertIn(i, p10)
self.assertNotIn(10, p10)
def test_weakrefs(self):
# Testing weak references...
import weakref
class C(object):
pass
c = C()
r = weakref.ref(c)
self.assertEqual(r(), c)
del c
support.gc_collect()
self.assertEqual(r(), None)
del r
class NoWeak(object):
__slots__ = ['foo']
no = NoWeak()
try:
weakref.ref(no)
except TypeError as msg:
self.assertTrue(str(msg).find("weak reference") >= 0)
else:
self.fail("weakref.ref(no) should be illegal")
class Weak(object):
__slots__ = ['foo', '__weakref__']
yes = Weak()
r = weakref.ref(yes)
self.assertEqual(r(), yes)
del yes
support.gc_collect()
self.assertEqual(r(), None)
del r
def test_properties(self):
# Testing property...
class C(object):
def getx(self):
return self.__x
def setx(self, value):
self.__x = value
def delx(self):
del self.__x
x = property(getx, setx, delx, doc="I'm the x property.")
a = C()
self.assertFalse(hasattr(a, "x"))
a.x = 42
self.assertEqual(a._C__x, 42)
self.assertEqual(a.x, 42)
del a.x
self.assertFalse(hasattr(a, "x"))
self.assertFalse(hasattr(a, "_C__x"))
C.x.__set__(a, 100)
self.assertEqual(C.x.__get__(a), 100)
C.x.__delete__(a)
self.assertFalse(hasattr(a, "x"))
raw = C.__dict__['x']
self.assertIsInstance(raw, property)
attrs = dir(raw)
self.assertIn("__doc__", attrs)
self.assertIn("fget", attrs)
self.assertIn("fset", attrs)
self.assertIn("fdel", attrs)
self.assertEqual(raw.__doc__, "I'm the x property.")
self.assertTrue(raw.fget is C.__dict__['getx'])
self.assertTrue(raw.fset is C.__dict__['setx'])
self.assertTrue(raw.fdel is C.__dict__['delx'])
for attr in "__doc__", "fget", "fset", "fdel":
try:
setattr(raw, attr, 42)
except AttributeError as msg:
if str(msg).find('readonly') < 0:
self.fail("when setting readonly attr %r on a property, "
"got unexpected AttributeError msg %r" % (attr, str(msg)))
else:
self.fail("expected AttributeError from trying to set readonly %r "
"attr on a property" % attr)
class D(object):
__getitem__ = property(lambda s: 1/0)
d = D()
try:
for i in d:
str(i)
except ZeroDivisionError:
pass
else:
self.fail("expected ZeroDivisionError from bad property")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_properties_doc_attrib(self):
class E(object):
def getter(self):
"getter method"
return 0
def setter(self_, value):
"setter method"
pass
prop = property(getter)
self.assertEqual(prop.__doc__, "getter method")
prop2 = property(fset=setter)
self.assertEqual(prop2.__doc__, None)
def test_testcapi_no_segfault(self):
# this segfaulted in 2.5b2
try:
import _testcapi
except ImportError:
pass
else:
class X(object):
p = property(_testcapi.test_with_docstring)
def test_properties_plus(self):
class C(object):
foo = property(doc="hello")
@foo.getter
def foo(self):
return self._foo
@foo.setter
def foo(self, value):
self._foo = abs(value)
@foo.deleter
def foo(self):
del self._foo
c = C()
self.assertEqual(C.foo.__doc__, "hello")
self.assertFalse(hasattr(c, "foo"))
c.foo = -42
self.assertTrue(hasattr(c, '_foo'))
self.assertEqual(c._foo, 42)
self.assertEqual(c.foo, 42)
del c.foo
self.assertFalse(hasattr(c, '_foo'))
self.assertFalse(hasattr(c, "foo"))
class D(C):
@C.foo.deleter
def foo(self):
try:
del self._foo
except AttributeError:
pass
d = D()
d.foo = 24
self.assertEqual(d.foo, 24)
del d.foo
del d.foo
class E(object):
@property
def foo(self):
return self._foo
@foo.setter
def foo(self, value):
raise RuntimeError
@foo.setter
def foo(self, value):
self._foo = abs(value)
@foo.deleter
def foo(self, value=None):
del self._foo
e = E()
e.foo = -42
self.assertEqual(e.foo, 42)
del e.foo
class F(E):
@E.foo.deleter
def foo(self):
del self._foo
@foo.setter
def foo(self, value):
self._foo = max(0, value)
f = F()
f.foo = -10
self.assertEqual(f.foo, 0)
del f.foo
def test_dict_constructors(self):
# Testing dict constructor ...
d = dict()
self.assertEqual(d, {})
d = dict({})
self.assertEqual(d, {})
d = dict({1: 2, 'a': 'b'})
self.assertEqual(d, {1: 2, 'a': 'b'})
self.assertEqual(d, dict(list(d.items())))
self.assertEqual(d, dict(iter(d.items())))
d = dict({'one':1, 'two':2})
self.assertEqual(d, dict(one=1, two=2))
self.assertEqual(d, dict(**d))
self.assertEqual(d, dict({"one": 1}, two=2))
self.assertEqual(d, dict([("two", 2)], one=1))
self.assertEqual(d, dict([("one", 100), ("two", 200)], **d))
self.assertEqual(d, dict(**d))
for badarg in 0, 0, 0j, "0", [0], (0,):
try:
dict(badarg)
except TypeError:
pass
except ValueError:
if badarg == "0":
# It's a sequence, and its elements are also sequences (gotta
# love strings <wink>), but they aren't of length 2, so this
# one seemed better as a ValueError than a TypeError.
pass
else:
self.fail("no TypeError from dict(%r)" % badarg)
else:
self.fail("no TypeError from dict(%r)" % badarg)
try:
dict({}, {})
except TypeError:
pass
else:
self.fail("no TypeError from dict({}, {})")
class Mapping:
# Lacks a .keys() method; will be added later.
dict = {1:2, 3:4, 'a':1j}
try:
dict(Mapping())
except TypeError:
pass
else:
self.fail("no TypeError from dict(incomplete mapping)")
Mapping.keys = lambda self: list(self.dict.keys())
Mapping.__getitem__ = lambda self, i: self.dict[i]
d = dict(Mapping())
self.assertEqual(d, Mapping.dict)
# Init from sequence of iterable objects, each producing a 2-sequence.
class AddressBookEntry:
def __init__(self, first, last):
self.first = first
self.last = last
def __iter__(self):
return iter([self.first, self.last])
d = dict([AddressBookEntry('Tim', 'Warsaw'),
AddressBookEntry('Barry', 'Peters'),
AddressBookEntry('Tim', 'Peters'),
AddressBookEntry('Barry', 'Warsaw')])
self.assertEqual(d, {'Barry': 'Warsaw', 'Tim': 'Peters'})
d = dict(zip(range(4), range(1, 5)))
self.assertEqual(d, dict([(i, i+1) for i in range(4)]))
# Bad sequence lengths.
for bad in [('tooshort',)], [('too', 'long', 'by 1')]:
try:
dict(bad)
except ValueError:
pass
else:
self.fail("no ValueError from dict(%r)" % bad)
def test_dir(self):
# Testing dir() ...
junk = 12
self.assertEqual(dir(), ['junk', 'self'])
del junk
# Just make sure these don't blow up!
for arg in 2, 2, 2j, 2e0, [2], "2", b"2", (2,), {2:2}, type, self.test_dir:
dir(arg)
# Test dir on new-style classes. Since these have object as a
# base class, a lot more gets sucked in.
def interesting(strings):
return [s for s in strings if not s.startswith('_')]
class C(object):
Cdata = 1
def Cmethod(self): pass
cstuff = ['Cdata', 'Cmethod']
self.assertEqual(interesting(dir(C)), cstuff)
c = C()
self.assertEqual(interesting(dir(c)), cstuff)
## self.assertIn('__self__', dir(C.Cmethod))
c.cdata = 2
c.cmethod = lambda self: 0
self.assertEqual(interesting(dir(c)), cstuff + ['cdata', 'cmethod'])
## self.assertIn('__self__', dir(c.Cmethod))
class A(C):
Adata = 1
def Amethod(self): pass
astuff = ['Adata', 'Amethod'] + cstuff
self.assertEqual(interesting(dir(A)), astuff)
## self.assertIn('__self__', dir(A.Amethod))
a = A()
self.assertEqual(interesting(dir(a)), astuff)
a.adata = 42
a.amethod = lambda self: 3
self.assertEqual(interesting(dir(a)), astuff + ['adata', 'amethod'])
## self.assertIn('__self__', dir(a.Amethod))
# Try a module subclass.
class M(type(sys)):
pass
minstance = M("m")
minstance.b = 2
minstance.a = 1
names = [x for x in dir(minstance) if x not in ["__name__", "__doc__"]]
self.assertEqual(names, ['a', 'b'])
class M2(M):
def getdict(self):
return "Not a dict!"
__dict__ = property(getdict)
m2instance = M2("m2")
m2instance.b = 2
m2instance.a = 1
self.assertEqual(m2instance.__dict__, "Not a dict!")
try:
dir(m2instance)
except TypeError:
pass
# Two essentially featureless objects, just inheriting stuff from
# object.
self.assertEqual(dir(NotImplemented), dir(Ellipsis))
# Nasty test case for proxied objects
class Wrapper(object):
def __init__(self, obj):
self.__obj = obj
def __repr__(self):
return "Wrapper(%s)" % repr(self.__obj)
def __getitem__(self, key):
return Wrapper(self.__obj[key])
def __len__(self):
return len(self.__obj)
def __getattr__(self, name):
return Wrapper(getattr(self.__obj, name))
class C(object):
def __getclass(self):
return Wrapper(type(self))
__class__ = property(__getclass)
dir(C()) # This used to segfault
def test_supers(self):
# Testing super...
class A(object):
def meth(self, a):
return "A(%r)" % a
self.assertEqual(A().meth(1), "A(1)")
class B(A):
def __init__(self):
self.__super = super(B, self)
def meth(self, a):
return "B(%r)" % a + self.__super.meth(a)
self.assertEqual(B().meth(2), "B(2)A(2)")
class C(A):
def meth(self, a):
return "C(%r)" % a + self.__super.meth(a)
C._C__super = super(C)
self.assertEqual(C().meth(3), "C(3)A(3)")
class D(C, B):
def meth(self, a):
return "D(%r)" % a + super(D, self).meth(a)
self.assertEqual(D().meth(4), "D(4)C(4)B(4)A(4)")
# Test for subclassing super
class mysuper(super):
def __init__(self, *args):
return super(mysuper, self).__init__(*args)
class E(D):
def meth(self, a):
return "E(%r)" % a + mysuper(E, self).meth(a)
self.assertEqual(E().meth(5), "E(5)D(5)C(5)B(5)A(5)")
class F(E):
def meth(self, a):
s = self.__super # == mysuper(F, self)
return "F(%r)[%s]" % (a, s.__class__.__name__) + s.meth(a)
F._F__super = mysuper(F)
self.assertEqual(F().meth(6), "F(6)[mysuper]E(6)D(6)C(6)B(6)A(6)")
# Make sure certain errors are raised
try:
super(D, 42)
except TypeError:
pass
else:
self.fail("shouldn't allow super(D, 42)")
try:
super(D, C())
except TypeError:
pass
else:
self.fail("shouldn't allow super(D, C())")
try:
super(D).__get__(12)
except TypeError:
pass
else:
self.fail("shouldn't allow super(D).__get__(12)")
try:
super(D).__get__(C())
except TypeError:
pass
else:
self.fail("shouldn't allow super(D).__get__(C())")
# Make sure data descriptors can be overridden and accessed via super
# (new feature in Python 2.3)
class DDbase(object):
def getx(self): return 42
x = property(getx)
class DDsub(DDbase):
def getx(self): return "hello"
x = property(getx)
dd = DDsub()
self.assertEqual(dd.x, "hello")
self.assertEqual(super(DDsub, dd).x, 42)
# Ensure that super() lookup of descriptor from classmethod
# works (SF ID# 743627)
class Base(object):
aProp = property(lambda self: "foo")
class Sub(Base):
@classmethod
def test(klass):
return super(Sub,klass).aProp
self.assertEqual(Sub.test(), Base.aProp)
# Verify that super() doesn't allow keyword args
try:
super(Base, kw=1)
except TypeError:
pass
else:
self.assertEqual("super shouldn't accept keyword args")
def test_basic_inheritance(self):
# Testing inheritance from basic types...
class hexint(int):
def __repr__(self):
return hex(self)
def __add__(self, other):
return hexint(int.__add__(self, other))
# (Note that overriding __radd__ doesn't work,
# because the int type gets first dibs.)
self.assertEqual(repr(hexint(7) + 9), "0x10")
self.assertEqual(repr(hexint(1000) + 7), "0x3ef")
a = hexint(12345)
self.assertEqual(a, 12345)
self.assertEqual(int(a), 12345)
self.assertTrue(int(a).__class__ is int)
self.assertEqual(hash(a), hash(12345))
self.assertTrue((+a).__class__ is int)
self.assertTrue((a >> 0).__class__ is int)
self.assertTrue((a << 0).__class__ is int)
self.assertTrue((hexint(0) << 12).__class__ is int)
self.assertTrue((hexint(0) >> 12).__class__ is int)
class octlong(int):
__slots__ = []
def __str__(self):
return oct(self)
def __add__(self, other):
return self.__class__(super(octlong, self).__add__(other))
__radd__ = __add__
self.assertEqual(str(octlong(3) + 5), "0o10")
# (Note that overriding __radd__ here only seems to work
# because the example uses a short int left argument.)
self.assertEqual(str(5 + octlong(3000)), "0o5675")
a = octlong(12345)
self.assertEqual(a, 12345)
self.assertEqual(int(a), 12345)
self.assertEqual(hash(a), hash(12345))
self.assertTrue(int(a).__class__ is int)
self.assertTrue((+a).__class__ is int)
self.assertTrue((-a).__class__ is int)
self.assertTrue((-octlong(0)).__class__ is int)
self.assertTrue((a >> 0).__class__ is int)
self.assertTrue((a << 0).__class__ is int)
self.assertTrue((a - 0).__class__ is int)
self.assertTrue((a * 1).__class__ is int)
self.assertTrue((a ** 1).__class__ is int)
self.assertTrue((a // 1).__class__ is int)
self.assertTrue((1 * a).__class__ is int)
self.assertTrue((a | 0).__class__ is int)
self.assertTrue((a ^ 0).__class__ is int)
self.assertTrue((a & -1).__class__ is int)
self.assertTrue((octlong(0) << 12).__class__ is int)
self.assertTrue((octlong(0) >> 12).__class__ is int)
self.assertTrue(abs(octlong(0)).__class__ is int)
# Because octlong overrides __add__, we can't check the absence of +0
# optimizations using octlong.
class longclone(int):
pass
a = longclone(1)
self.assertTrue((a + 0).__class__ is int)
self.assertTrue((0 + a).__class__ is int)
# Check that negative clones don't segfault
a = longclone(-1)
self.assertEqual(a.__dict__, {})
self.assertEqual(int(a), -1) # self.assertTrue PyNumber_Long() copies the sign bit
class precfloat(float):
__slots__ = ['prec']
def __init__(self, value=0.0, prec=12):
self.prec = int(prec)
def __repr__(self):
return "%.*g" % (self.prec, self)
self.assertEqual(repr(precfloat(1.1)), "1.1")
a = precfloat(12345)
self.assertEqual(a, 12345.0)
self.assertEqual(float(a), 12345.0)
self.assertTrue(float(a).__class__ is float)
self.assertEqual(hash(a), hash(12345.0))
self.assertTrue((+a).__class__ is float)
class madcomplex(complex):
def __repr__(self):
return "%.17gj%+.17g" % (self.imag, self.real)
a = madcomplex(-3, 4)
self.assertEqual(repr(a), "4j-3")
base = complex(-3, 4)
self.assertEqual(base.__class__, complex)
self.assertEqual(a, base)
self.assertEqual(complex(a), base)
self.assertEqual(complex(a).__class__, complex)
a = madcomplex(a) # just trying another form of the constructor
self.assertEqual(repr(a), "4j-3")
self.assertEqual(a, base)
self.assertEqual(complex(a), base)
self.assertEqual(complex(a).__class__, complex)
self.assertEqual(hash(a), hash(base))
self.assertEqual((+a).__class__, complex)
self.assertEqual((a + 0).__class__, complex)
self.assertEqual(a + 0, base)
self.assertEqual((a - 0).__class__, complex)
self.assertEqual(a - 0, base)
self.assertEqual((a * 1).__class__, complex)
self.assertEqual(a * 1, base)
self.assertEqual((a / 1).__class__, complex)
self.assertEqual(a / 1, base)
class madtuple(tuple):
_rev = None
def rev(self):
if self._rev is not None:
return self._rev
L = list(self)
L.reverse()
self._rev = self.__class__(L)
return self._rev
a = madtuple((1,2,3,4,5,6,7,8,9,0))
self.assertEqual(a, (1,2,3,4,5,6,7,8,9,0))
self.assertEqual(a.rev(), madtuple((0,9,8,7,6,5,4,3,2,1)))
self.assertEqual(a.rev().rev(), madtuple((1,2,3,4,5,6,7,8,9,0)))
for i in range(512):
t = madtuple(range(i))
u = t.rev()
v = u.rev()
self.assertEqual(v, t)
a = madtuple((1,2,3,4,5))
self.assertEqual(tuple(a), (1,2,3,4,5))
self.assertTrue(tuple(a).__class__ is tuple)
self.assertEqual(hash(a), hash((1,2,3,4,5)))
self.assertTrue(a[:].__class__ is tuple)
self.assertTrue((a * 1).__class__ is tuple)
self.assertTrue((a * 0).__class__ is tuple)
self.assertTrue((a + ()).__class__ is tuple)
a = madtuple(())
self.assertEqual(tuple(a), ())
self.assertTrue(tuple(a).__class__ is tuple)
self.assertTrue((a + a).__class__ is tuple)
self.assertTrue((a * 0).__class__ is tuple)
self.assertTrue((a * 1).__class__ is tuple)
self.assertTrue((a * 2).__class__ is tuple)
self.assertTrue(a[:].__class__ is tuple)
class madstring(str):
_rev = None
def rev(self):
if self._rev is not None:
return self._rev
L = list(self)
L.reverse()
self._rev = self.__class__("".join(L))
return self._rev
s = madstring("abcdefghijklmnopqrstuvwxyz")
self.assertEqual(s, "abcdefghijklmnopqrstuvwxyz")
self.assertEqual(s.rev(), madstring("zyxwvutsrqponmlkjihgfedcba"))
self.assertEqual(s.rev().rev(), madstring("abcdefghijklmnopqrstuvwxyz"))
for i in range(256):
s = madstring("".join(map(chr, range(i))))
t = s.rev()
u = t.rev()
self.assertEqual(u, s)
s = madstring("12345")
self.assertEqual(str(s), "12345")
self.assertTrue(str(s).__class__ is str)
base = "\x00" * 5
s = madstring(base)
self.assertEqual(s, base)
self.assertEqual(str(s), base)
self.assertTrue(str(s).__class__ is str)
self.assertEqual(hash(s), hash(base))
self.assertEqual({s: 1}[base], 1)
self.assertEqual({base: 1}[s], 1)
self.assertTrue((s + "").__class__ is str)
self.assertEqual(s + "", base)
self.assertTrue(("" + s).__class__ is str)
self.assertEqual("" + s, base)
self.assertTrue((s * 0).__class__ is str)
self.assertEqual(s * 0, "")
self.assertTrue((s * 1).__class__ is str)
self.assertEqual(s * 1, base)
self.assertTrue((s * 2).__class__ is str)
self.assertEqual(s * 2, base + base)
self.assertTrue(s[:].__class__ is str)
self.assertEqual(s[:], base)
self.assertTrue(s[0:0].__class__ is str)
self.assertEqual(s[0:0], "")
self.assertTrue(s.strip().__class__ is str)
self.assertEqual(s.strip(), base)
self.assertTrue(s.lstrip().__class__ is str)
self.assertEqual(s.lstrip(), base)
self.assertTrue(s.rstrip().__class__ is str)
self.assertEqual(s.rstrip(), base)
identitytab = {}
self.assertTrue(s.translate(identitytab).__class__ is str)
self.assertEqual(s.translate(identitytab), base)
self.assertTrue(s.replace("x", "x").__class__ is str)
self.assertEqual(s.replace("x", "x"), base)
self.assertTrue(s.ljust(len(s)).__class__ is str)
self.assertEqual(s.ljust(len(s)), base)
self.assertTrue(s.rjust(len(s)).__class__ is str)
self.assertEqual(s.rjust(len(s)), base)
self.assertTrue(s.center(len(s)).__class__ is str)
self.assertEqual(s.center(len(s)), base)
self.assertTrue(s.lower().__class__ is str)
self.assertEqual(s.lower(), base)
class madunicode(str):
_rev = None
def rev(self):
if self._rev is not None:
return self._rev
L = list(self)
L.reverse()
self._rev = self.__class__("".join(L))
return self._rev
u = madunicode("ABCDEF")
self.assertEqual(u, "ABCDEF")
self.assertEqual(u.rev(), madunicode("FEDCBA"))
self.assertEqual(u.rev().rev(), madunicode("ABCDEF"))
base = "12345"
u = madunicode(base)
self.assertEqual(str(u), base)
self.assertTrue(str(u).__class__ is str)
self.assertEqual(hash(u), hash(base))
self.assertEqual({u: 1}[base], 1)
self.assertEqual({base: 1}[u], 1)
self.assertTrue(u.strip().__class__ is str)
self.assertEqual(u.strip(), base)
self.assertTrue(u.lstrip().__class__ is str)
self.assertEqual(u.lstrip(), base)
self.assertTrue(u.rstrip().__class__ is str)
self.assertEqual(u.rstrip(), base)
self.assertTrue(u.replace("x", "x").__class__ is str)
self.assertEqual(u.replace("x", "x"), base)
self.assertTrue(u.replace("xy", "xy").__class__ is str)
self.assertEqual(u.replace("xy", "xy"), base)
self.assertTrue(u.center(len(u)).__class__ is str)
self.assertEqual(u.center(len(u)), base)
self.assertTrue(u.ljust(len(u)).__class__ is str)
self.assertEqual(u.ljust(len(u)), base)
self.assertTrue(u.rjust(len(u)).__class__ is str)
self.assertEqual(u.rjust(len(u)), base)
self.assertTrue(u.lower().__class__ is str)
self.assertEqual(u.lower(), base)
self.assertTrue(u.upper().__class__ is str)
self.assertEqual(u.upper(), base)
self.assertTrue(u.capitalize().__class__ is str)
self.assertEqual(u.capitalize(), base)
self.assertTrue(u.title().__class__ is str)
self.assertEqual(u.title(), base)
self.assertTrue((u + "").__class__ is str)
self.assertEqual(u + "", base)
self.assertTrue(("" + u).__class__ is str)
self.assertEqual("" + u, base)
self.assertTrue((u * 0).__class__ is str)
self.assertEqual(u * 0, "")
self.assertTrue((u * 1).__class__ is str)
self.assertEqual(u * 1, base)
self.assertTrue((u * 2).__class__ is str)
self.assertEqual(u * 2, base + base)
self.assertTrue(u[:].__class__ is str)
self.assertEqual(u[:], base)
self.assertTrue(u[0:0].__class__ is str)
self.assertEqual(u[0:0], "")
class sublist(list):
pass
a = sublist(range(5))
self.assertEqual(a, list(range(5)))
a.append("hello")
self.assertEqual(a, list(range(5)) + ["hello"])
a[5] = 5
self.assertEqual(a, list(range(6)))
a.extend(range(6, 20))
self.assertEqual(a, list(range(20)))
a[-5:] = []
self.assertEqual(a, list(range(15)))
del a[10:15]
self.assertEqual(len(a), 10)
self.assertEqual(a, list(range(10)))
self.assertEqual(list(a), list(range(10)))
self.assertEqual(a[0], 0)
self.assertEqual(a[9], 9)
self.assertEqual(a[-10], 0)
self.assertEqual(a[-1], 9)
self.assertEqual(a[:5], list(range(5)))
## class CountedInput(file):
## """Counts lines read by self.readline().
##
## self.lineno is the 0-based ordinal of the last line read, up to
## a maximum of one greater than the number of lines in the file.
##
## self.ateof is true if and only if the final "" line has been read,
## at which point self.lineno stops incrementing, and further calls
## to readline() continue to return "".
## """
##
## lineno = 0
## ateof = 0
## def readline(self):
## if self.ateof:
## return ""
## s = file.readline(self)
## # Next line works too.
## # s = super(CountedInput, self).readline()
## self.lineno += 1
## if s == "":
## self.ateof = 1
## return s
##
## f = file(name=support.TESTFN, mode='w')
## lines = ['a\n', 'b\n', 'c\n']
## try:
## f.writelines(lines)
## f.close()
## f = CountedInput(support.TESTFN)
## for (i, expected) in zip(range(1, 5) + [4], lines + 2 * [""]):
## got = f.readline()
## self.assertEqual(expected, got)
## self.assertEqual(f.lineno, i)
## self.assertEqual(f.ateof, (i > len(lines)))
## f.close()
## finally:
## try:
## f.close()
## except:
## pass
## support.unlink(support.TESTFN)
def test_keywords(self):
# Testing keyword args to basic type constructors ...
self.assertEqual(int(x=1), 1)
self.assertEqual(float(x=2), 2.0)
self.assertEqual(int(x=3), 3)
self.assertEqual(complex(imag=42, real=666), complex(666, 42))
self.assertEqual(str(object=500), '500')
self.assertEqual(str(object=b'abc', errors='strict'), 'abc')
self.assertEqual(tuple(sequence=range(3)), (0, 1, 2))
self.assertEqual(list(sequence=(0, 1, 2)), list(range(3)))
# note: as of Python 2.3, dict() no longer has an "items" keyword arg
for constructor in (int, float, int, complex, str, str,
tuple, list):
try:
constructor(bogus_keyword_arg=1)
except TypeError:
pass
else:
self.fail("expected TypeError from bogus keyword argument to %r"
% constructor)
def test_str_subclass_as_dict_key(self):
# Testing a str subclass used as dict key ..
class cistr(str):
"""Sublcass of str that computes __eq__ case-insensitively.
Also computes a hash code of the string in canonical form.
"""
def __init__(self, value):
self.canonical = value.lower()
self.hashcode = hash(self.canonical)
def __eq__(self, other):
if not isinstance(other, cistr):
other = cistr(other)
return self.canonical == other.canonical
def __hash__(self):
return self.hashcode
self.assertEqual(cistr('ABC'), 'abc')
self.assertEqual('aBc', cistr('ABC'))
self.assertEqual(str(cistr('ABC')), 'ABC')
d = {cistr('one'): 1, cistr('two'): 2, cistr('tHree'): 3}
self.assertEqual(d[cistr('one')], 1)
self.assertEqual(d[cistr('tWo')], 2)
self.assertEqual(d[cistr('THrEE')], 3)
self.assertIn(cistr('ONe'), d)
self.assertEqual(d.get(cistr('thrEE')), 3)
def test_classic_comparisons(self):
# Testing classic comparisons...
class classic:
pass
for base in (classic, int, object):
class C(base):
def __init__(self, value):
self.value = int(value)
def __eq__(self, other):
if isinstance(other, C):
return self.value == other.value
if isinstance(other, int) or isinstance(other, int):
return self.value == other
return NotImplemented
def __ne__(self, other):
if isinstance(other, C):
return self.value != other.value
if isinstance(other, int) or isinstance(other, int):
return self.value != other
return NotImplemented
def __lt__(self, other):
if isinstance(other, C):
return self.value < other.value
if isinstance(other, int) or isinstance(other, int):
return self.value < other
return NotImplemented
def __le__(self, other):
if isinstance(other, C):
return self.value <= other.value
if isinstance(other, int) or isinstance(other, int):
return self.value <= other
return NotImplemented
def __gt__(self, other):
if isinstance(other, C):
return self.value > other.value
if isinstance(other, int) or isinstance(other, int):
return self.value > other
return NotImplemented
def __ge__(self, other):
if isinstance(other, C):
return self.value >= other.value
if isinstance(other, int) or isinstance(other, int):
return self.value >= other
return NotImplemented
c1 = C(1)
c2 = C(2)
c3 = C(3)
self.assertEqual(c1, 1)
c = {1: c1, 2: c2, 3: c3}
for x in 1, 2, 3:
for y in 1, 2, 3:
for op in "<", "<=", "==", "!=", ">", ">=":
self.assertTrue(eval("c[x] %s c[y]" % op) ==
eval("x %s y" % op),
"x=%d, y=%d" % (x, y))
self.assertTrue(eval("c[x] %s y" % op) ==
eval("x %s y" % op),
"x=%d, y=%d" % (x, y))
self.assertTrue(eval("x %s c[y]" % op) ==
eval("x %s y" % op),
"x=%d, y=%d" % (x, y))
def test_rich_comparisons(self):
# Testing rich comparisons...
class Z(complex):
pass
z = Z(1)
self.assertEqual(z, 1+0j)
self.assertEqual(1+0j, z)
class ZZ(complex):
def __eq__(self, other):
try:
return abs(self - other) <= 1e-6
except:
return NotImplemented
zz = ZZ(1.0000003)
self.assertEqual(zz, 1+0j)
self.assertEqual(1+0j, zz)
class classic:
pass
for base in (classic, int, object, list):
class C(base):
def __init__(self, value):
self.value = int(value)
def __cmp__(self_, other):
self.fail("shouldn't call __cmp__")
def __eq__(self, other):
if isinstance(other, C):
return self.value == other.value
if isinstance(other, int) or isinstance(other, int):
return self.value == other
return NotImplemented
def __ne__(self, other):
if isinstance(other, C):
return self.value != other.value
if isinstance(other, int) or isinstance(other, int):
return self.value != other
return NotImplemented
def __lt__(self, other):
if isinstance(other, C):
return self.value < other.value
if isinstance(other, int) or isinstance(other, int):
return self.value < other
return NotImplemented
def __le__(self, other):
if isinstance(other, C):
return self.value <= other.value
if isinstance(other, int) or isinstance(other, int):
return self.value <= other
return NotImplemented
def __gt__(self, other):
if isinstance(other, C):
return self.value > other.value
if isinstance(other, int) or isinstance(other, int):
return self.value > other
return NotImplemented
def __ge__(self, other):
if isinstance(other, C):
return self.value >= other.value
if isinstance(other, int) or isinstance(other, int):
return self.value >= other
return NotImplemented
c1 = C(1)
c2 = C(2)
c3 = C(3)
self.assertEqual(c1, 1)
c = {1: c1, 2: c2, 3: c3}
for x in 1, 2, 3:
for y in 1, 2, 3:
for op in "<", "<=", "==", "!=", ">", ">=":
self.assertTrue(eval("c[x] %s c[y]" % op) == eval("x %s y" % op),
"x=%d, y=%d" % (x, y))
self.assertTrue(eval("c[x] %s y" % op) == eval("x %s y" % op),
"x=%d, y=%d" % (x, y))
self.assertTrue(eval("x %s c[y]" % op) == eval("x %s y" % op),
"x=%d, y=%d" % (x, y))
def test_descrdoc(self):
# Testing descriptor doc strings...
from _io import FileIO
def check(descr, what):
self.assertEqual(descr.__doc__, what)
check(FileIO.closed, "True if the file is closed") # getset descriptor
check(complex.real, "the real part of a complex number") # member descriptor
def test_doc_descriptor(self):
# Testing __doc__ descriptor...
# SF bug 542984
class DocDescr(object):
def __get__(self, object, otype):
if object:
object = object.__class__.__name__ + ' instance'
if otype:
otype = otype.__name__
return 'object=%s; type=%s' % (object, otype)
class OldClass:
__doc__ = DocDescr()
class NewClass(object):
__doc__ = DocDescr()
self.assertEqual(OldClass.__doc__, 'object=None; type=OldClass')
self.assertEqual(OldClass().__doc__, 'object=OldClass instance; type=OldClass')
self.assertEqual(NewClass.__doc__, 'object=None; type=NewClass')
self.assertEqual(NewClass().__doc__, 'object=NewClass instance; type=NewClass')
def test_set_class(self):
# Testing __class__ assignment...
class C(object): pass
class D(object): pass
class E(object): pass
class F(D, E): pass
for cls in C, D, E, F:
for cls2 in C, D, E, F:
x = cls()
x.__class__ = cls2
self.assertTrue(x.__class__ is cls2)
x.__class__ = cls
self.assertTrue(x.__class__ is cls)
def cant(x, C):
try:
x.__class__ = C
except TypeError:
pass
else:
self.fail("shouldn't allow %r.__class__ = %r" % (x, C))
try:
delattr(x, "__class__")
except (TypeError, AttributeError):
pass
else:
self.fail("shouldn't allow del %r.__class__" % x)
cant(C(), list)
cant(list(), C)
cant(C(), 1)
cant(C(), object)
cant(object(), list)
cant(list(), object)
class Int(int): __slots__ = []
cant(2, Int)
cant(Int(), int)
cant(True, int)
cant(2, bool)
o = object()
cant(o, type(1))
cant(o, type(None))
del o
class G(object):
__slots__ = ["a", "b"]
class H(object):
__slots__ = ["b", "a"]
class I(object):
__slots__ = ["a", "b"]
class J(object):
__slots__ = ["c", "b"]
class K(object):
__slots__ = ["a", "b", "d"]
class L(H):
__slots__ = ["e"]
class M(I):
__slots__ = ["e"]
class N(J):
__slots__ = ["__weakref__"]
class P(J):
__slots__ = ["__dict__"]
class Q(J):
pass
class R(J):
__slots__ = ["__dict__", "__weakref__"]
for cls, cls2 in ((G, H), (G, I), (I, H), (Q, R), (R, Q)):
x = cls()
x.a = 1
x.__class__ = cls2
self.assertTrue(x.__class__ is cls2,
"assigning %r as __class__ for %r silently failed" % (cls2, x))
self.assertEqual(x.a, 1)
x.__class__ = cls
self.assertTrue(x.__class__ is cls,
"assigning %r as __class__ for %r silently failed" % (cls, x))
self.assertEqual(x.a, 1)
for cls in G, J, K, L, M, N, P, R, list, Int:
for cls2 in G, J, K, L, M, N, P, R, list, Int:
if cls is cls2:
continue
cant(cls(), cls2)
# Issue5283: when __class__ changes in __del__, the wrong
# type gets DECREF'd.
class O(object):
pass
class A(object):
def __del__(self):
self.__class__ = O
l = [A() for x in range(100)]
del l
def test_set_dict(self):
# Testing __dict__ assignment...
class C(object): pass
a = C()
a.__dict__ = {'b': 1}
self.assertEqual(a.b, 1)
def cant(x, dict):
try:
x.__dict__ = dict
except (AttributeError, TypeError):
pass
else:
self.fail("shouldn't allow %r.__dict__ = %r" % (x, dict))
cant(a, None)
cant(a, [])
cant(a, 1)
del a.__dict__ # Deleting __dict__ is allowed
class Base(object):
pass
def verify_dict_readonly(x):
"""
x has to be an instance of a class inheriting from Base.
"""
cant(x, {})
try:
del x.__dict__
except (AttributeError, TypeError):
pass
else:
self.fail("shouldn't allow del %r.__dict__" % x)
dict_descr = Base.__dict__["__dict__"]
try:
dict_descr.__set__(x, {})
except (AttributeError, TypeError):
pass
else:
self.fail("dict_descr allowed access to %r's dict" % x)
# Classes don't allow __dict__ assignment and have readonly dicts
class Meta1(type, Base):
pass
class Meta2(Base, type):
pass
class D(object, metaclass=Meta1):
pass
class E(object, metaclass=Meta2):
pass
for cls in C, D, E:
verify_dict_readonly(cls)
class_dict = cls.__dict__
try:
class_dict["spam"] = "eggs"
except TypeError:
pass
else:
self.fail("%r's __dict__ can be modified" % cls)
# Modules also disallow __dict__ assignment
class Module1(types.ModuleType, Base):
pass
class Module2(Base, types.ModuleType):
pass
for ModuleType in Module1, Module2:
mod = ModuleType("spam")
verify_dict_readonly(mod)
mod.__dict__["spam"] = "eggs"
# Exception's __dict__ can be replaced, but not deleted
# (at least not any more than regular exception's __dict__ can
# be deleted; on CPython it is not the case, whereas on PyPy they
# can, just like any other new-style instance's __dict__.)
def can_delete_dict(e):
try:
del e.__dict__
except (TypeError, AttributeError):
return False
else:
return True
class Exception1(Exception, Base):
pass
class Exception2(Base, Exception):
pass
for ExceptionType in Exception, Exception1, Exception2:
e = ExceptionType()
e.__dict__ = {"a": 1}
self.assertEqual(e.a, 1)
self.assertEqual(can_delete_dict(e), can_delete_dict(ValueError()))
def test_pickles(self):
# Testing pickling and copying new-style classes and objects...
import pickle
def sorteditems(d):
L = list(d.items())
L.sort()
return L
global C
class C(object):
def __init__(self, a, b):
super(C, self).__init__()
self.a = a
self.b = b
def __repr__(self):
return "C(%r, %r)" % (self.a, self.b)
global C1
class C1(list):
def __new__(cls, a, b):
return super(C1, cls).__new__(cls)
def __getnewargs__(self):
return (self.a, self.b)
def __init__(self, a, b):
self.a = a
self.b = b
def __repr__(self):
return "C1(%r, %r)<%r>" % (self.a, self.b, list(self))
global C2
class C2(int):
def __new__(cls, a, b, val=0):
return super(C2, cls).__new__(cls, val)
def __getnewargs__(self):
return (self.a, self.b, int(self))
def __init__(self, a, b, val=0):
self.a = a
self.b = b
def __repr__(self):
return "C2(%r, %r)<%r>" % (self.a, self.b, int(self))
global C3
class C3(object):
def __init__(self, foo):
self.foo = foo
def __getstate__(self):
return self.foo
def __setstate__(self, foo):
self.foo = foo
global C4classic, C4
class C4classic: # classic
pass
class C4(C4classic, object): # mixed inheritance
pass
for bin in 0, 1:
for cls in C, C1, C2:
s = pickle.dumps(cls, bin)
cls2 = pickle.loads(s)
self.assertTrue(cls2 is cls)
a = C1(1, 2); a.append(42); a.append(24)
b = C2("hello", "world", 42)
s = pickle.dumps((a, b), bin)
x, y = pickle.loads(s)
self.assertEqual(x.__class__, a.__class__)
self.assertEqual(sorteditems(x.__dict__), sorteditems(a.__dict__))
self.assertEqual(y.__class__, b.__class__)
self.assertEqual(sorteditems(y.__dict__), sorteditems(b.__dict__))
self.assertEqual(repr(x), repr(a))
self.assertEqual(repr(y), repr(b))
# Test for __getstate__ and __setstate__ on new style class
u = C3(42)
s = pickle.dumps(u, bin)
v = pickle.loads(s)
self.assertEqual(u.__class__, v.__class__)
self.assertEqual(u.foo, v.foo)
# Test for picklability of hybrid class
u = C4()
u.foo = 42
s = pickle.dumps(u, bin)
v = pickle.loads(s)
self.assertEqual(u.__class__, v.__class__)
self.assertEqual(u.foo, v.foo)
# Testing copy.deepcopy()
import copy
for cls in C, C1, C2:
cls2 = copy.deepcopy(cls)
self.assertTrue(cls2 is cls)
a = C1(1, 2); a.append(42); a.append(24)
b = C2("hello", "world", 42)
x, y = copy.deepcopy((a, b))
self.assertEqual(x.__class__, a.__class__)
self.assertEqual(sorteditems(x.__dict__), sorteditems(a.__dict__))
self.assertEqual(y.__class__, b.__class__)
self.assertEqual(sorteditems(y.__dict__), sorteditems(b.__dict__))
self.assertEqual(repr(x), repr(a))
self.assertEqual(repr(y), repr(b))
def test_pickle_slots(self):
# Testing pickling of classes with __slots__ ...
import pickle
# Pickling of classes with __slots__ but without __getstate__ should fail
# (if using protocol 0 or 1)
global B, C, D, E
class B(object):
pass
for base in [object, B]:
class C(base):
__slots__ = ['a']
class D(C):
pass
try:
pickle.dumps(C(), 0)
except TypeError:
pass
else:
self.fail("should fail: pickle C instance - %s" % base)
try:
pickle.dumps(C(), 0)
except TypeError:
pass
else:
self.fail("should fail: pickle D instance - %s" % base)
# Give C a nice generic __getstate__ and __setstate__
class C(base):
__slots__ = ['a']
def __getstate__(self):
try:
d = self.__dict__.copy()
except AttributeError:
d = {}
for cls in self.__class__.__mro__:
for sn in cls.__dict__.get('__slots__', ()):
try:
d[sn] = getattr(self, sn)
except AttributeError:
pass
return d
def __setstate__(self, d):
for k, v in list(d.items()):
setattr(self, k, v)
class D(C):
pass
# Now it should work
x = C()
y = pickle.loads(pickle.dumps(x))
self.assertEqual(hasattr(y, 'a'), 0)
x.a = 42
y = pickle.loads(pickle.dumps(x))
self.assertEqual(y.a, 42)
x = D()
x.a = 42
x.b = 100
y = pickle.loads(pickle.dumps(x))
self.assertEqual(y.a + y.b, 142)
# A subclass that adds a slot should also work
class E(C):
__slots__ = ['b']
x = E()
x.a = 42
x.b = "foo"
y = pickle.loads(pickle.dumps(x))
self.assertEqual(y.a, x.a)
self.assertEqual(y.b, x.b)
def test_binary_operator_override(self):
# Testing overrides of binary operations...
class I(int):
def __repr__(self):
return "I(%r)" % int(self)
def __add__(self, other):
return I(int(self) + int(other))
__radd__ = __add__
def __pow__(self, other, mod=None):
if mod is None:
return I(pow(int(self), int(other)))
else:
return I(pow(int(self), int(other), int(mod)))
def __rpow__(self, other, mod=None):
if mod is None:
return I(pow(int(other), int(self), mod))
else:
return I(pow(int(other), int(self), int(mod)))
self.assertEqual(repr(I(1) + I(2)), "I(3)")
self.assertEqual(repr(I(1) + 2), "I(3)")
self.assertEqual(repr(1 + I(2)), "I(3)")
self.assertEqual(repr(I(2) ** I(3)), "I(8)")
self.assertEqual(repr(2 ** I(3)), "I(8)")
self.assertEqual(repr(I(2) ** 3), "I(8)")
self.assertEqual(repr(pow(I(2), I(3), I(5))), "I(3)")
class S(str):
def __eq__(self, other):
return self.lower() == other.lower()
def test_subclass_propagation(self):
# Testing propagation of slot functions to subclasses...
class A(object):
pass
class B(A):
pass
class C(A):
pass
class D(B, C):
pass
d = D()
orig_hash = hash(d) # related to id(d) in platform-dependent ways
A.__hash__ = lambda self: 42
self.assertEqual(hash(d), 42)
C.__hash__ = lambda self: 314
self.assertEqual(hash(d), 314)
B.__hash__ = lambda self: 144
self.assertEqual(hash(d), 144)
D.__hash__ = lambda self: 100
self.assertEqual(hash(d), 100)
D.__hash__ = None
self.assertRaises(TypeError, hash, d)
del D.__hash__
self.assertEqual(hash(d), 144)
B.__hash__ = None
self.assertRaises(TypeError, hash, d)
del B.__hash__
self.assertEqual(hash(d), 314)
C.__hash__ = None
self.assertRaises(TypeError, hash, d)
del C.__hash__
self.assertEqual(hash(d), 42)
A.__hash__ = None
self.assertRaises(TypeError, hash, d)
del A.__hash__
self.assertEqual(hash(d), orig_hash)
d.foo = 42
d.bar = 42
self.assertEqual(d.foo, 42)
self.assertEqual(d.bar, 42)
def __getattribute__(self, name):
if name == "foo":
return 24
return object.__getattribute__(self, name)
A.__getattribute__ = __getattribute__
self.assertEqual(d.foo, 24)
self.assertEqual(d.bar, 42)
def __getattr__(self, name):
if name in ("spam", "foo", "bar"):
return "hello"
raise AttributeError(name)
B.__getattr__ = __getattr__
self.assertEqual(d.spam, "hello")
self.assertEqual(d.foo, 24)
self.assertEqual(d.bar, 42)
del A.__getattribute__
self.assertEqual(d.foo, 42)
del d.foo
self.assertEqual(d.foo, "hello")
self.assertEqual(d.bar, 42)
del B.__getattr__
try:
d.foo
except AttributeError:
pass
else:
self.fail("d.foo should be undefined now")
# Test a nasty bug in recurse_down_subclasses()
class A(object):
pass
class B(A):
pass
del B
support.gc_collect()
A.__setitem__ = lambda *a: None # crash
def test_buffer_inheritance(self):
# Testing that buffer interface is inherited ...
import binascii
# SF bug [#470040] ParseTuple t# vs subclasses.
class MyBytes(bytes):
pass
base = b'abc'
m = MyBytes(base)
# b2a_hex uses the buffer interface to get its argument's value, via
# PyArg_ParseTuple 't#' code.
self.assertEqual(binascii.b2a_hex(m), binascii.b2a_hex(base))
class MyInt(int):
pass
m = MyInt(42)
try:
binascii.b2a_hex(m)
self.fail('subclass of int should not have a buffer interface')
except TypeError:
pass
def test_str_of_str_subclass(self):
# Testing __str__ defined in subclass of str ...
import binascii
import io
class octetstring(str):
def __str__(self):
return binascii.b2a_hex(self.encode('ascii')).decode("ascii")
def __repr__(self):
return self + " repr"
o = octetstring('A')
self.assertEqual(type(o), octetstring)
self.assertEqual(type(str(o)), str)
self.assertEqual(type(repr(o)), str)
self.assertEqual(ord(o), 0x41)
self.assertEqual(str(o), '41')
self.assertEqual(repr(o), 'A repr')
self.assertEqual(o.__str__(), '41')
self.assertEqual(o.__repr__(), 'A repr')
capture = io.StringIO()
# Calling str() or not exercises different internal paths.
print(o, file=capture)
print(str(o), file=capture)
self.assertEqual(capture.getvalue(), '41\n41\n')
capture.close()
def test_keyword_arguments(self):
# Testing keyword arguments to __init__, __call__...
def f(a): return a
self.assertEqual(f.__call__(a=42), 42)
a = []
list.__init__(a, sequence=[0, 1, 2])
self.assertEqual(a, [0, 1, 2])
def test_recursive_call(self):
# Testing recursive __call__() by setting to instance of class...
class A(object):
pass
A.__call__ = A()
try:
A()()
except RuntimeError:
pass
else:
self.fail("Recursion limit should have been reached for __call__()")
def test_delete_hook(self):
# Testing __del__ hook...
log = []
class C(object):
def __del__(self):
log.append(1)
c = C()
self.assertEqual(log, [])
del c
support.gc_collect()
self.assertEqual(log, [1])
class D(object): pass
d = D()
try: del d[0]
except TypeError: pass
else: self.fail("invalid del() didn't raise TypeError")
def test_hash_inheritance(self):
# Testing hash of mutable subclasses...
class mydict(dict):
pass
d = mydict()
try:
hash(d)
except TypeError:
pass
else:
self.fail("hash() of dict subclass should fail")
class mylist(list):
pass
d = mylist()
try:
hash(d)
except TypeError:
pass
else:
self.fail("hash() of list subclass should fail")
def test_str_operations(self):
try: 'a' + 5
except TypeError: pass
else: self.fail("'' + 5 doesn't raise TypeError")
try: ''.split('')
except ValueError: pass
else: self.fail("''.split('') doesn't raise ValueError")
try: ''.join([0])
except TypeError: pass
else: self.fail("''.join([0]) doesn't raise TypeError")
try: ''.rindex('5')
except ValueError: pass
else: self.fail("''.rindex('5') doesn't raise ValueError")
try: '%(n)s' % None
except TypeError: pass
else: self.fail("'%(n)s' % None doesn't raise TypeError")
try: '%(n' % {}
except ValueError: pass
else: self.fail("'%(n' % {} '' doesn't raise ValueError")
try: '%*s' % ('abc')
except TypeError: pass
else: self.fail("'%*s' % ('abc') doesn't raise TypeError")
try: '%*.*s' % ('abc', 5)
except TypeError: pass
else: self.fail("'%*.*s' % ('abc', 5) doesn't raise TypeError")
try: '%s' % (1, 2)
except TypeError: pass
else: self.fail("'%s' % (1, 2) doesn't raise TypeError")
try: '%' % None
except ValueError: pass
else: self.fail("'%' % None doesn't raise ValueError")
self.assertEqual('534253'.isdigit(), 1)
self.assertEqual('534253x'.isdigit(), 0)
self.assertEqual('%c' % 5, '\x05')
self.assertEqual('%c' % '5', '5')
def test_deepcopy_recursive(self):
# Testing deepcopy of recursive objects...
class Node:
pass
a = Node()
b = Node()
a.b = b
b.a = a
z = deepcopy(a) # This blew up before
def test_unintialized_modules(self):
# Testing uninitialized module objects...
from types import ModuleType as M
m = M.__new__(M)
str(m)
self.assertEqual(hasattr(m, "__name__"), 0)
self.assertEqual(hasattr(m, "__file__"), 0)
self.assertEqual(hasattr(m, "foo"), 0)
self.assertFalse(m.__dict__) # None or {} are both reasonable answers
m.foo = 1
self.assertEqual(m.__dict__, {"foo": 1})
def test_funny_new(self):
# Testing __new__ returning something unexpected...
class C(object):
def __new__(cls, arg):
if isinstance(arg, str): return [1, 2, 3]
elif isinstance(arg, int): return object.__new__(D)
else: return object.__new__(cls)
class D(C):
def __init__(self, arg):
self.foo = arg
self.assertEqual(C("1"), [1, 2, 3])
self.assertEqual(D("1"), [1, 2, 3])
d = D(None)
self.assertEqual(d.foo, None)
d = C(1)
self.assertIsInstance(d, D)
self.assertEqual(d.foo, 1)
d = D(1)
self.assertIsInstance(d, D)
self.assertEqual(d.foo, 1)
def test_imul_bug(self):
# Testing for __imul__ problems...
# SF bug 544647
class C(object):
def __imul__(self, other):
return (self, other)
x = C()
y = x
y *= 1.0
self.assertEqual(y, (x, 1.0))
y = x
y *= 2
self.assertEqual(y, (x, 2))
y = x
y *= 3
self.assertEqual(y, (x, 3))
y = x
y *= 1<<100
self.assertEqual(y, (x, 1<<100))
y = x
y *= None
self.assertEqual(y, (x, None))
y = x
y *= "foo"
self.assertEqual(y, (x, "foo"))
def test_copy_setstate(self):
# Testing that copy.*copy() correctly uses __setstate__...
import copy
class C(object):
def __init__(self, foo=None):
self.foo = foo
self.__foo = foo
def setfoo(self, foo=None):
self.foo = foo
def getfoo(self):
return self.__foo
def __getstate__(self):
return [self.foo]
def __setstate__(self_, lst):
self.assertEqual(len(lst), 1)
self_.__foo = self_.foo = lst[0]
a = C(42)
a.setfoo(24)
self.assertEqual(a.foo, 24)
self.assertEqual(a.getfoo(), 42)
b = copy.copy(a)
self.assertEqual(b.foo, 24)
self.assertEqual(b.getfoo(), 24)
b = copy.deepcopy(a)
self.assertEqual(b.foo, 24)
self.assertEqual(b.getfoo(), 24)
def test_slices(self):
# Testing cases with slices and overridden __getitem__ ...
# Strings
self.assertEqual("hello"[:4], "hell")
self.assertEqual("hello"[slice(4)], "hell")
self.assertEqual(str.__getitem__("hello", slice(4)), "hell")
class S(str):
def __getitem__(self, x):
return str.__getitem__(self, x)
self.assertEqual(S("hello")[:4], "hell")
self.assertEqual(S("hello")[slice(4)], "hell")
self.assertEqual(S("hello").__getitem__(slice(4)), "hell")
# Tuples
self.assertEqual((1,2,3)[:2], (1,2))
self.assertEqual((1,2,3)[slice(2)], (1,2))
self.assertEqual(tuple.__getitem__((1,2,3), slice(2)), (1,2))
class T(tuple):
def __getitem__(self, x):
return tuple.__getitem__(self, x)
self.assertEqual(T((1,2,3))[:2], (1,2))
self.assertEqual(T((1,2,3))[slice(2)], (1,2))
self.assertEqual(T((1,2,3)).__getitem__(slice(2)), (1,2))
# Lists
self.assertEqual([1,2,3][:2], [1,2])
self.assertEqual([1,2,3][slice(2)], [1,2])
self.assertEqual(list.__getitem__([1,2,3], slice(2)), [1,2])
class L(list):
def __getitem__(self, x):
return list.__getitem__(self, x)
self.assertEqual(L([1,2,3])[:2], [1,2])
self.assertEqual(L([1,2,3])[slice(2)], [1,2])
self.assertEqual(L([1,2,3]).__getitem__(slice(2)), [1,2])
# Now do lists and __setitem__
a = L([1,2,3])
a[slice(1, 3)] = [3,2]
self.assertEqual(a, [1,3,2])
a[slice(0, 2, 1)] = [3,1]
self.assertEqual(a, [3,1,2])
a.__setitem__(slice(1, 3), [2,1])
self.assertEqual(a, [3,2,1])
a.__setitem__(slice(0, 2, 1), [2,3])
self.assertEqual(a, [2,3,1])
def test_subtype_resurrection(self):
# Testing resurrection of new-style instance...
class C(object):
container = []
def __del__(self):
# resurrect the instance
C.container.append(self)
c = C()
c.attr = 42
# The most interesting thing here is whether this blows up, due to
# flawed GC tracking logic in typeobject.c's call_finalizer() (a 2.2.1
# bug).
del c
# If that didn't blow up, it's also interesting to see whether clearing
# the last container slot works: that will attempt to delete c again,
# which will cause c to get appended back to the container again
# "during" the del. (On non-CPython implementations, however, __del__
# is typically not called again.)
support.gc_collect()
self.assertEqual(len(C.container), 1)
del C.container[-1]
if support.check_impl_detail():
support.gc_collect()
self.assertEqual(len(C.container), 1)
self.assertEqual(C.container[-1].attr, 42)
# Make c mortal again, so that the test framework with -l doesn't report
# it as a leak.
del C.__del__
def test_slots_trash(self):
# Testing slot trash...
# Deallocating deeply nested slotted trash caused stack overflows
class trash(object):
__slots__ = ['x']
def __init__(self, x):
self.x = x
o = None
for i in range(50000):
o = trash(o)
del o
def test_slots_multiple_inheritance(self):
# SF bug 575229, multiple inheritance w/ slots dumps core
class A(object):
__slots__=()
class B(object):
pass
class C(A,B) :
__slots__=()
if support.check_impl_detail():
self.assertEqual(C.__basicsize__, B.__basicsize__)
self.assertTrue(hasattr(C, '__dict__'))
self.assertTrue(hasattr(C, '__weakref__'))
C().x = 2
def test_rmul(self):
# Testing correct invocation of __rmul__...
# SF patch 592646
class C(object):
def __mul__(self, other):
return "mul"
def __rmul__(self, other):
return "rmul"
a = C()
self.assertEqual(a*2, "mul")
self.assertEqual(a*2.2, "mul")
self.assertEqual(2*a, "rmul")
self.assertEqual(2.2*a, "rmul")
def test_ipow(self):
# Testing correct invocation of __ipow__...
# [SF bug 620179]
class C(object):
def __ipow__(self, other):
pass
a = C()
a **= 2
def test_mutable_bases(self):
# Testing mutable bases...
# stuff that should work:
class C(object):
pass
class C2(object):
def __getattribute__(self, attr):
if attr == 'a':
return 2
else:
return super(C2, self).__getattribute__(attr)
def meth(self):
return 1
class D(C):
pass
class E(D):
pass
d = D()
e = E()
D.__bases__ = (C,)
D.__bases__ = (C2,)
self.assertEqual(d.meth(), 1)
self.assertEqual(e.meth(), 1)
self.assertEqual(d.a, 2)
self.assertEqual(e.a, 2)
self.assertEqual(C2.__subclasses__(), [D])
try:
del D.__bases__
except (TypeError, AttributeError):
pass
else:
self.fail("shouldn't be able to delete .__bases__")
try:
D.__bases__ = ()
except TypeError as msg:
if str(msg) == "a new-style class can't have only classic bases":
self.fail("wrong error message for .__bases__ = ()")
else:
self.fail("shouldn't be able to set .__bases__ to ()")
try:
D.__bases__ = (D,)
except TypeError:
pass
else:
# actually, we'll have crashed by here...
self.fail("shouldn't be able to create inheritance cycles")
try:
D.__bases__ = (C, C)
except TypeError:
pass
else:
self.fail("didn't detect repeated base classes")
try:
D.__bases__ = (E,)
except TypeError:
pass
else:
self.fail("shouldn't be able to create inheritance cycles")
def test_builtin_bases(self):
# Make sure all the builtin types can have their base queried without
# segfaulting. See issue #5787.
builtin_types = [tp for tp in builtins.__dict__.values()
if isinstance(tp, type)]
for tp in builtin_types:
object.__getattribute__(tp, "__bases__")
if tp is not object:
self.assertEqual(len(tp.__bases__), 1, tp)
class L(list):
pass
class C(object):
pass
class D(C):
pass
try:
L.__bases__ = (dict,)
except TypeError:
pass
else:
self.fail("shouldn't turn list subclass into dict subclass")
try:
list.__bases__ = (dict,)
except TypeError:
pass
else:
self.fail("shouldn't be able to assign to list.__bases__")
try:
D.__bases__ = (C, list)
except TypeError:
pass
else:
assert 0, "best_base calculation found wanting"
def test_mutable_bases_with_failing_mro(self):
# Testing mutable bases with failing mro...
class WorkOnce(type):
def __new__(self, name, bases, ns):
self.flag = 0
return super(WorkOnce, self).__new__(WorkOnce, name, bases, ns)
def mro(self):
if self.flag > 0:
raise RuntimeError("bozo")
else:
self.flag += 1
return type.mro(self)
class WorkAlways(type):
def mro(self):
# this is here to make sure that .mro()s aren't called
# with an exception set (which was possible at one point).
# An error message will be printed in a debug build.
# What's a good way to test for this?
return type.mro(self)
class C(object):
pass
class C2(object):
pass
class D(C):
pass
class E(D):
pass
class F(D, metaclass=WorkOnce):
pass
class G(D, metaclass=WorkAlways):
pass
# Immediate subclasses have their mro's adjusted in alphabetical
# order, so E's will get adjusted before adjusting F's fails. We
# check here that E's gets restored.
E_mro_before = E.__mro__
D_mro_before = D.__mro__
try:
D.__bases__ = (C2,)
except RuntimeError:
self.assertEqual(E.__mro__, E_mro_before)
self.assertEqual(D.__mro__, D_mro_before)
else:
self.fail("exception not propagated")
def test_mutable_bases_catch_mro_conflict(self):
# Testing mutable bases catch mro conflict...
class A(object):
pass
class B(object):
pass
class C(A, B):
pass
class D(A, B):
pass
class E(C, D):
pass
try:
C.__bases__ = (B, A)
except TypeError:
pass
else:
self.fail("didn't catch MRO conflict")
def test_mutable_names(self):
# Testing mutable names...
class C(object):
pass
# C.__module__ could be 'test_descr' or '__main__'
mod = C.__module__
C.__name__ = 'D'
self.assertEqual((C.__module__, C.__name__), (mod, 'D'))
C.__name__ = 'D.E'
self.assertEqual((C.__module__, C.__name__), (mod, 'D.E'))
def test_subclass_right_op(self):
# Testing correct dispatch of subclass overloading __r<op>__...
# This code tests various cases where right-dispatch of a subclass
# should be preferred over left-dispatch of a base class.
# Case 1: subclass of int; this tests code in abstract.c::binary_op1()
class B(int):
def __floordiv__(self, other):
return "B.__floordiv__"
def __rfloordiv__(self, other):
return "B.__rfloordiv__"
self.assertEqual(B(1) // 1, "B.__floordiv__")
self.assertEqual(1 // B(1), "B.__rfloordiv__")
# Case 2: subclass of object; this is just the baseline for case 3
class C(object):
def __floordiv__(self, other):
return "C.__floordiv__"
def __rfloordiv__(self, other):
return "C.__rfloordiv__"
self.assertEqual(C() // 1, "C.__floordiv__")
self.assertEqual(1 // C(), "C.__rfloordiv__")
# Case 3: subclass of new-style class; here it gets interesting
class D(C):
def __floordiv__(self, other):
return "D.__floordiv__"
def __rfloordiv__(self, other):
return "D.__rfloordiv__"
self.assertEqual(D() // C(), "D.__floordiv__")
self.assertEqual(C() // D(), "D.__rfloordiv__")
# Case 4: this didn't work right in 2.2.2 and 2.3a1
class E(C):
pass
self.assertEqual(E.__rfloordiv__, C.__rfloordiv__)
self.assertEqual(E() // 1, "C.__floordiv__")
self.assertEqual(1 // E(), "C.__rfloordiv__")
self.assertEqual(E() // C(), "C.__floordiv__")
self.assertEqual(C() // E(), "C.__floordiv__") # This one would fail
@support.impl_detail("testing an internal kind of method object")
def test_meth_class_get(self):
# Testing __get__ method of METH_CLASS C methods...
# Full coverage of descrobject.c::classmethod_get()
# Baseline
arg = [1, 2, 3]
res = {1: None, 2: None, 3: None}
self.assertEqual(dict.fromkeys(arg), res)
self.assertEqual({}.fromkeys(arg), res)
# Now get the descriptor
descr = dict.__dict__["fromkeys"]
# More baseline using the descriptor directly
self.assertEqual(descr.__get__(None, dict)(arg), res)
self.assertEqual(descr.__get__({})(arg), res)
# Now check various error cases
try:
descr.__get__(None, None)
except TypeError:
pass
else:
self.fail("shouldn't have allowed descr.__get__(None, None)")
try:
descr.__get__(42)
except TypeError:
pass
else:
self.fail("shouldn't have allowed descr.__get__(42)")
try:
descr.__get__(None, 42)
except TypeError:
pass
else:
self.fail("shouldn't have allowed descr.__get__(None, 42)")
try:
descr.__get__(None, int)
except TypeError:
pass
else:
self.fail("shouldn't have allowed descr.__get__(None, int)")
def test_isinst_isclass(self):
# Testing proxy isinstance() and isclass()...
class Proxy(object):
def __init__(self, obj):
self.__obj = obj
def __getattribute__(self, name):
if name.startswith("_Proxy__"):
return object.__getattribute__(self, name)
else:
return getattr(self.__obj, name)
# Test with a classic class
class C:
pass
a = C()
pa = Proxy(a)
self.assertIsInstance(a, C) # Baseline
self.assertIsInstance(pa, C) # Test
# Test with a classic subclass
class D(C):
pass
a = D()
pa = Proxy(a)
self.assertIsInstance(a, C) # Baseline
self.assertIsInstance(pa, C) # Test
# Test with a new-style class
class C(object):
pass
a = C()
pa = Proxy(a)
self.assertIsInstance(a, C) # Baseline
self.assertIsInstance(pa, C) # Test
# Test with a new-style subclass
class D(C):
pass
a = D()
pa = Proxy(a)
self.assertIsInstance(a, C) # Baseline
self.assertIsInstance(pa, C) # Test
def test_proxy_super(self):
# Testing super() for a proxy object...
class Proxy(object):
def __init__(self, obj):
self.__obj = obj
def __getattribute__(self, name):
if name.startswith("_Proxy__"):
return object.__getattribute__(self, name)
else:
return getattr(self.__obj, name)
class B(object):
def f(self):
return "B.f"
class C(B):
def f(self):
return super(C, self).f() + "->C.f"
obj = C()
p = Proxy(obj)
self.assertEqual(C.__dict__["f"](p), "B.f->C.f")
def test_carloverre(self):
# Testing prohibition of Carlo Verre's hack...
try:
object.__setattr__(str, "foo", 42)
except TypeError:
pass
else:
self.fail("Carlo Verre __setattr__ succeeded!")
try:
object.__delattr__(str, "lower")
except TypeError:
pass
else:
self.fail("Carlo Verre __delattr__ succeeded!")
def test_weakref_segfault(self):
# Testing weakref segfault...
# SF 742911
import weakref
class Provoker:
def __init__(self, referrent):
self.ref = weakref.ref(referrent)
def __del__(self):
x = self.ref()
class Oops(object):
pass
o = Oops()
o.whatever = Provoker(o)
del o
def test_wrapper_segfault(self):
# SF 927248: deeply nested wrappers could cause stack overflow
f = lambda:None
for i in range(1000000):
f = f.__call__
f = None
def test_file_fault(self):
# Testing sys.stdout is changed in getattr...
test_stdout = sys.stdout
class StdoutGuard:
def __getattr__(self, attr):
sys.stdout = sys.__stdout__
raise RuntimeError("Premature access to sys.stdout.%s" % attr)
sys.stdout = StdoutGuard()
try:
print("Oops!")
except RuntimeError:
pass
finally:
sys.stdout = test_stdout
def test_vicious_descriptor_nonsense(self):
# Testing vicious_descriptor_nonsense...
# A potential segfault spotted by Thomas Wouters in mail to
# python-dev 2003-04-17, turned into an example & fixed by Michael
# Hudson just less than four months later...
class Evil(object):
def __hash__(self):
return hash('attr')
def __eq__(self, other):
del C.attr
return 0
class Descr(object):
def __get__(self, ob, type=None):
return 1
class C(object):
attr = Descr()
c = C()
c.__dict__[Evil()] = 0
self.assertEqual(c.attr, 1)
# this makes a crash more likely:
support.gc_collect()
self.assertEqual(hasattr(c, 'attr'), False)
def test_init(self):
# SF 1155938
class Foo(object):
def __init__(self):
return 10
try:
Foo()
except TypeError:
pass
else:
self.fail("did not test __init__() for None return")
def test_method_wrapper(self):
# Testing method-wrapper objects...
# <type 'method-wrapper'> did not support any reflection before 2.5
# XXX should methods really support __eq__?
l = []
self.assertEqual(l.__add__, l.__add__)
self.assertEqual(l.__add__, [].__add__)
self.assertTrue(l.__add__ != [5].__add__)
self.assertTrue(l.__add__ != l.__mul__)
self.assertTrue(l.__add__.__name__ == '__add__')
if hasattr(l.__add__, '__self__'):
# CPython
self.assertTrue(l.__add__.__self__ is l)
self.assertTrue(l.__add__.__objclass__ is list)
else:
# Python implementations where [].__add__ is a normal bound method
self.assertTrue(l.__add__.im_self is l)
self.assertTrue(l.__add__.im_class is list)
self.assertEqual(l.__add__.__doc__, list.__add__.__doc__)
try:
hash(l.__add__)
except TypeError:
pass
else:
self.fail("no TypeError from hash([].__add__)")
t = ()
t += (7,)
self.assertEqual(t.__add__, (7,).__add__)
self.assertEqual(hash(t.__add__), hash((7,).__add__))
def test_not_implemented(self):
# Testing NotImplemented...
# all binary methods should be able to return a NotImplemented
import operator
def specialmethod(self, other):
return NotImplemented
def check(expr, x, y):
try:
exec(expr, {'x': x, 'y': y, 'operator': operator})
except TypeError:
pass
else:
self.fail("no TypeError from %r" % (expr,))
N1 = sys.maxsize + 1 # might trigger OverflowErrors instead of
# TypeErrors
N2 = sys.maxsize # if sizeof(int) < sizeof(long), might trigger
# ValueErrors instead of TypeErrors
for name, expr, iexpr in [
('__add__', 'x + y', 'x += y'),
('__sub__', 'x - y', 'x -= y'),
('__mul__', 'x * y', 'x *= y'),
('__truediv__', 'operator.truediv(x, y)', None),
('__floordiv__', 'operator.floordiv(x, y)', None),
('__div__', 'x / y', 'x /= y'),
('__mod__', 'x % y', 'x %= y'),
('__divmod__', 'divmod(x, y)', None),
('__pow__', 'x ** y', 'x **= y'),
('__lshift__', 'x << y', 'x <<= y'),
('__rshift__', 'x >> y', 'x >>= y'),
('__and__', 'x & y', 'x &= y'),
('__or__', 'x | y', 'x |= y'),
('__xor__', 'x ^ y', 'x ^= y')]:
rname = '__r' + name[2:]
A = type('A', (), {name: specialmethod})
a = A()
check(expr, a, a)
check(expr, a, N1)
check(expr, a, N2)
if iexpr:
check(iexpr, a, a)
check(iexpr, a, N1)
check(iexpr, a, N2)
iname = '__i' + name[2:]
C = type('C', (), {iname: specialmethod})
c = C()
check(iexpr, c, a)
check(iexpr, c, N1)
check(iexpr, c, N2)
def test_assign_slice(self):
# ceval.c's assign_slice used to check for
# tp->tp_as_sequence->sq_slice instead of
# tp->tp_as_sequence->sq_ass_slice
class C(object):
def __setitem__(self, idx, value):
self.value = value
c = C()
c[1:2] = 3
self.assertEqual(c.value, 3)
def test_set_and_no_get(self):
# See
# http://mail.python.org/pipermail/python-dev/2010-January/095637.html
class Descr(object):
def __init__(self, name):
self.name = name
def __set__(self, obj, value):
obj.__dict__[self.name] = value
descr = Descr("a")
class X(object):
a = descr
x = X()
self.assertIs(x.a, descr)
x.a = 42
self.assertEqual(x.a, 42)
# Also check type_getattro for correctness.
class Meta(type):
pass
class X(object):
__metaclass__ = Meta
X.a = 42
Meta.a = Descr("a")
self.assertEqual(X.a, 42)
def test_getattr_hooks(self):
# issue 4230
class Descriptor(object):
counter = 0
def __get__(self, obj, objtype=None):
def getter(name):
self.counter += 1
raise AttributeError(name)
return getter
descr = Descriptor()
class A(object):
__getattribute__ = descr
class B(object):
__getattr__ = descr
class C(object):
__getattribute__ = descr
__getattr__ = descr
self.assertRaises(AttributeError, getattr, A(), "attr")
self.assertEqual(descr.counter, 1)
self.assertRaises(AttributeError, getattr, B(), "attr")
self.assertEqual(descr.counter, 2)
self.assertRaises(AttributeError, getattr, C(), "attr")
self.assertEqual(descr.counter, 4)
class EvilGetattribute(object):
# This used to segfault
def __getattr__(self, name):
raise AttributeError(name)
def __getattribute__(self, name):
del EvilGetattribute.__getattr__
for i in range(5):
gc.collect()
raise AttributeError(name)
self.assertRaises(AttributeError, getattr, EvilGetattribute(), "attr")
def test_type___getattribute__(self):
self.assertRaises(TypeError, type.__getattribute__, list, type)
def test_abstractmethods(self):
# type pretends not to have __abstractmethods__.
self.assertRaises(AttributeError, getattr, type, "__abstractmethods__")
class meta(type):
pass
self.assertRaises(AttributeError, getattr, meta, "__abstractmethods__")
class X(object):
pass
with self.assertRaises(AttributeError):
del X.__abstractmethods__
def test_proxy_call(self):
class FakeStr:
__class__ = str
fake_str = FakeStr()
# isinstance() reads __class__
self.assertTrue(isinstance(fake_str, str))
# call a method descriptor
with self.assertRaises(TypeError):
str.split(fake_str)
# call a slot wrapper descriptor
with self.assertRaises(TypeError):
str.__add__(fake_str, "abc")
def test_repr_as_str(self):
# Issue #11603: crash or infinite loop when rebinding __str__ as
# __repr__.
class Foo:
pass
Foo.__repr__ = Foo.__str__
foo = Foo()
self.assertRaises(RuntimeError, str, foo)
self.assertRaises(RuntimeError, repr, foo)
def test_mixing_slot_wrappers(self):
class X(dict):
__setattr__ = dict.__setitem__
x = X()
x.y = 42
self.assertEqual(x["y"], 42)
def test_slot_shadows_class_variable(self):
with self.assertRaises(ValueError) as cm:
class X:
__slots__ = ["foo"]
foo = None
m = str(cm.exception)
self.assertEqual("'foo' in __slots__ conflicts with class variable", m)
def test_set_doc(self):
class X:
"elephant"
X.__doc__ = "banana"
self.assertEqual(X.__doc__, "banana")
with self.assertRaises(TypeError) as cm:
type(list).__dict__["__doc__"].__set__(list, "blah")
self.assertIn("can't set list.__doc__", str(cm.exception))
with self.assertRaises(TypeError) as cm:
type(X).__dict__["__doc__"].__delete__(X)
self.assertIn("can't delete X.__doc__", str(cm.exception))
self.assertEqual(X.__doc__, "banana")
def test_qualname(self):
descriptors = [str.lower, complex.real, float.real, int.__add__]
types = ['method', 'member', 'getset', 'wrapper']
# make sure we have an example of each type of descriptor
for d, n in zip(descriptors, types):
self.assertEqual(type(d).__name__, n + '_descriptor')
for d in descriptors:
qualname = d.__objclass__.__qualname__ + '.' + d.__name__
self.assertEqual(d.__qualname__, qualname)
self.assertEqual(str.lower.__qualname__, 'str.lower')
self.assertEqual(complex.real.__qualname__, 'complex.real')
self.assertEqual(float.real.__qualname__, 'float.real')
self.assertEqual(int.__add__.__qualname__, 'int.__add__')
def test_qualname_dict(self):
ns = {'__qualname__': 'some.name'}
tp = type('Foo', (), ns)
self.assertEqual(tp.__qualname__, 'some.name')
self.assertEqual(tp.__dict__['__qualname__'], 'some.name')
self.assertEqual(ns, {'__qualname__': 'some.name'})
ns = {'__qualname__': 1}
self.assertRaises(TypeError, type, 'Foo', (), ns)
def test_cycle_through_dict(self):
# See bug #1469629
class X(dict):
def __init__(self):
dict.__init__(self)
self.__dict__ = self
x = X()
x.attr = 42
wr = weakref.ref(x)
del x
support.gc_collect()
self.assertIsNone(wr())
for o in gc.get_objects():
self.assertIsNot(type(o), X)
def test_object_new_and_init_with_parameters(self):
# See issue #1683368
class OverrideNeither:
pass
self.assertRaises(TypeError, OverrideNeither, 1)
self.assertRaises(TypeError, OverrideNeither, kw=1)
class OverrideNew:
def __new__(cls, foo, kw=0, *args, **kwds):
return object.__new__(cls, *args, **kwds)
class OverrideInit:
def __init__(self, foo, kw=0, *args, **kwargs):
return object.__init__(self, *args, **kwargs)
class OverrideBoth(OverrideNew, OverrideInit):
pass
for case in OverrideNew, OverrideInit, OverrideBoth:
case(1)
case(1, kw=2)
self.assertRaises(TypeError, case, 1, 2, 3)
self.assertRaises(TypeError, case, 1, 2, foo=3)
class DictProxyTests(unittest.TestCase):
def setUp(self):
class C(object):
def meth(self):
pass
self.C = C
@unittest.skipIf(hasattr(sys, 'gettrace') and sys.gettrace(),
'trace function introduces __local__')
def test_iter_keys(self):
# Testing dict-proxy keys...
it = self.C.__dict__.keys()
self.assertNotIsInstance(it, list)
keys = list(it)
keys.sort()
self.assertEqual(keys, ['__dict__', '__doc__', '__module__',
'__qualname__', '__weakref__', 'meth'])
@unittest.skipIf(hasattr(sys, 'gettrace') and sys.gettrace(),
'trace function introduces __local__')
def test_iter_values(self):
# Testing dict-proxy values...
it = self.C.__dict__.values()
self.assertNotIsInstance(it, list)
values = list(it)
self.assertEqual(len(values), 6)
@unittest.skipIf(hasattr(sys, 'gettrace') and sys.gettrace(),
'trace function introduces __local__')
def test_iter_items(self):
# Testing dict-proxy iteritems...
it = self.C.__dict__.items()
self.assertNotIsInstance(it, list)
keys = [item[0] for item in it]
keys.sort()
self.assertEqual(keys, ['__dict__', '__doc__', '__module__',
'__qualname__', '__weakref__', 'meth'])
def test_dict_type_with_metaclass(self):
# Testing type of __dict__ when metaclass set...
class B(object):
pass
class M(type):
pass
class C(metaclass=M):
# In 2.3a1, C.__dict__ was a real dict rather than a dict proxy
pass
self.assertEqual(type(C.__dict__), type(B.__dict__))
def test_repr(self):
# Testing mappingproxy.__repr__.
# We can't blindly compare with the repr of another dict as ordering
# of keys and values is arbitrary and may differ.
r = repr(self.C.__dict__)
self.assertTrue(r.startswith('mappingproxy('), r)
self.assertTrue(r.endswith(')'), r)
for k, v in self.C.__dict__.items():
self.assertIn('{!r}: {!r}'.format(k, v), r)
class PTypesLongInitTest(unittest.TestCase):
# This is in its own TestCase so that it can be run before any other tests.
def test_pytype_long_ready(self):
# Testing SF bug 551412 ...
# This dumps core when SF bug 551412 isn't fixed --
# but only when test_descr.py is run separately.
# (That can't be helped -- as soon as PyType_Ready()
# is called for PyLong_Type, the bug is gone.)
class UserLong(object):
def __pow__(self, *args):
pass
try:
pow(0, UserLong(), 0)
except:
pass
# Another segfault only when run early
# (before PyType_Ready(tuple) is called)
type.mro(tuple)
class MiscTests(unittest.TestCase):
def test_type_lookup_mro_reference(self):
# Issue #14199: _PyType_Lookup() has to keep a strong reference to
# the type MRO because it may be modified during the lookup, if
# __bases__ is set during the lookup for example.
class MyKey(object):
def __hash__(self):
return hash('mykey')
def __eq__(self, other):
X.__bases__ = (Base2,)
class Base(object):
mykey = 'from Base'
mykey2 = 'from Base'
class Base2(object):
mykey = 'from Base2'
mykey2 = 'from Base2'
X = type('X', (Base,), {MyKey(): 5})
# mykey is read from Base
self.assertEqual(X.mykey, 'from Base')
# mykey2 is read from Base2 because MyKey.__eq__ has set __bases__
self.assertEqual(X.mykey2, 'from Base2')
def test_main():
# Run all local test cases, with PTypesLongInitTest first.
support.run_unittest(PTypesLongInitTest, OperatorsTest,
ClassPropertiesAndMethods, DictProxyTests,
MiscTests)
if __name__ == "__main__":
test_main()
| MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.3.0/Lib/test/test_descr.py | Python | mit | 159,901 |
## Copyright 2011, IOActive, Inc. All rights reserved.
##
## AndBug is free software: you can redistribute it and/or modify it under
## the terms of version 3 of the GNU Lesser General Public License as
## published by the Free Software Foundation.
##
## AndBug is distributed in the hope that it will be useful, but WITHOUT ANY
## WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
## FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
## more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with AndBug. If not, see <http://www.gnu.org/licenses/>.
import os, sys, time
from cStringIO import StringIO
def blocks(seq, sz):
ofs = 0
lim = len(seq)
while ofs < lim:
yield seq[ofs:ofs+sz]
ofs += sz
def censor(seq):
for ch in seq:
if ch < '!':
yield '.'
elif ch > '~':
yield '.'
else:
yield ch
def format_hex(data, indent="", width=16, out=None):
if out == None:
out = StringIO()
strout = True
else:
strout = False
indent += "%08x: "
ofs = 0
for block in blocks(data, width):
out.write(indent % ofs)
out.write(' '.join(map(lambda x: x.encode('hex'), block)))
if len(block) < width:
out.write( ' ' * (width - len(block)) )
out.write(' ')
out.write(''.join(censor(block)))
out.write(os.linesep)
ofs += len(block)
if strout:
return out.getvalue()
def parse_hex(dump, out=None):
if out == None:
out = StringIO()
strout = True
else:
strout = False
for row in dump.splitlines():
row = row.strip().split(' ')
block = row[1].strip().split(' ')
block = ''.join(map(lambda x: chr(int(x, 16)), block))
out.write(block)
if strout:
return out.getvalue()
class LogEvent(object):
def __init__(self, time, tag, meta, data):
self.time = time
self.tag = tag
self.meta = meta
self.data = data or ''
def __str__(self):
return "%s %s %s\n%s" % (
self.tag, self.time, self.meta,
format_hex(self.data, indent=" ")
)
class LogWriter(object):
def __init__(self, file=sys.stdout):
self.file = file
def writeEvent(self, evt):
self.file.write(str(evt))
class LogReader(object):
def __init__(self, file=sys.stdin):
self.file = file
self.last = None
def readLine(self):
if self.last is None:
line = self.file.readline().rstrip()
else:
line = self.last
self.last = None
return line
def pushLine(self, line):
self.last = line
def readEvent(self):
line = self.readLine()
if not line: return None
if line[0] == ' ':
return self.readEvent() # Again..
tag, time, meta = line.split(' ', 3)
time = int(time)
data = []
while True:
line = self.readLine()
if line.startswith( ' ' ):
data.append(line)
else:
self.pushLine(line)
break
if data:
data = parse_hex('\n'.join(data))
else:
data = ''
return LogEvent(time, tag, meta, data)
stderr = LogWriter(sys.stderr)
stdout = LogWriter(sys.stdout)
def error(tag, meta, data = None):
now = int(time.time())
stderr.writeEvent(LogEvent(now, tag, meta, data))
def info(tag, meta, data = None):
now = int(time.time())
stdout.writeEvent(LogEvent(now, tag, meta, data))
def read_log(path=None, file=None):
if path is None:
if file is None:
reader = LogReader(sys.stdin)
else:
reader = LogReader(file)
return reader
| eight-pack-abdominals/andbug | lib/andbug/log.py | Python | gpl-3.0 | 3,953 |
"""Tests for the microsoft_face component."""
| fbradyirl/home-assistant | tests/components/microsoft_face/__init__.py | Python | apache-2.0 | 46 |
import array
import math
from collections import defaultdict
class Graph(object):
def __init__(self, size):
self.inEdges = [[] for _ in range(size)]
self.outDegree = [0] * size
self.emptyNodes = []
self.nNode = size
def pagerank(graph, d, eps):
glen = graph.nNode
pg1, pg2 = [0.0]*glen, [0.0]*glen
weight1, weight2 = [0.0]*glen, [0.0]*glen
# initialize pagerank
for i in range(glen):
pg1[i] = 1.0
if graph.outDegree[i] != 0:
weight1[i] = 1.0 / graph.outDegree[i]
# power iteration
while True:
totalE = 0.0
for idx in graph.emptyNodes:
totalE += pg1[idx]
totalE /= glen
diff = 0.0
for i in range(glen):
w = totalE
for idx in graph.inEdges[i]:
w += weight1[idx]
pg2[i] = (1.0 - d) + d*w
if graph.outDegree[i] != 0:
weight2[i] = pg2[i] / graph.outDegree[i]
diff += pow(pg1[i]-pg2[i], 2)
if math.sqrt(diff) < eps:
break
pg1, pg2 = pg2, pg1
weight1, weight2 = weight2, weight1
return pg2
| shaform/pagerank | python3/pagerank.py | Python | mit | 1,170 |
import csv
import sys
import logging
import os
import uuid
import psycopg2
import psycopg2.extras
if __name__ == '__main__':
arch = sys.argv[1]
salida = '{}_proceso.csv'.format(arch.replace('.','_'))
host = os.environ['HOST']
user = os.environ['USER']
passwd = os.environ['PASS']
base = os.environ['BASE']
con = psycopg2.connect(host=host, user=user, password=passwd, database=base)
try:
cur = con.cursor()
try:
with open(arch, 'r') as f, open(salida,'w') as f2:
ingreso = csv.reader(f, delimiter=';')
for fila in ingreso:
apellido = fila[1].split(',')[0]
nombre = fila[1].split(',')[1]
dni = fila[2]
d = dni.lower().replace('pas','').replace('dni','').replace('ci','').replace('dnt','').strip()
if len(d) > 7:
d = d.upper()
n = nombre.strip()
a = apellido.strip()
uid = str(uuid.uuid4())
cur.execute('select 1 from profile.users where dni = %s', (d,))
if cur.rowcount > 0:
ss = '{};{};{};ya existe\n'.format(d, n, a)
f2.write(ss)
print(ss)
continue
cur.execute('insert into profile.users (id, dni, name, lastname) values (%s,%s,%s,%s)', (uid, d, n, a))
pid = str(uuid.uuid4())
pp = str(uuid.uuid4())
cur.execute('insert into credentials.user_password (id, user_id, username, password) values (%s,%s,%s,%s)', (pid, uid, d, pp))
con.commit()
ss = '{};{};{};creado\n'.format(d, n, a)
f2.write(ss)
print(ss)
finally:
cur.close()
finally:
con.close()
| pablodanielrey/scripts | docker/src/scripts/model/scripts/ingreso2018/importar.py | Python | gpl-3.0 | 2,028 |
#
# statusbar.py
#
# Copyright (C) 2007, 2008 Andrew Resch <[email protected]>
#
# Deluge is free software.
#
# You may redistribute it and/or modify it under the terms of the
# GNU General Public License, as published by the Free Software
# Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# deluge is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with deluge. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
#
#
import gtk
import gobject
import logging
from deluge.ui.client import client
import deluge.component as component
import deluge.common
import common
from deluge.configmanager import ConfigManager
log = logging.getLogger(__name__)
class StatusBarItem:
def __init__(self, image=None, stock=None, text=None, callback=None, tooltip=None):
self._widgets = []
self._ebox = gtk.EventBox()
self._hbox = gtk.HBox()
self._hbox.set_spacing(5)
self._image = gtk.Image()
self._label = gtk.Label()
self._hbox.add(self._image)
self._hbox.add(self._label)
self._ebox.add(self._hbox)
# Add image from file or stock
if image != None or stock != None:
if image != None:
self.set_image_from_file(image)
if stock != None:
self.set_image_from_stock(stock)
# Add text
if text != None:
self.set_text(text)
if callback != None:
self.set_callback(callback)
if tooltip:
self.set_tooltip(tooltip)
self.show_all()
def set_callback(self, callback):
self._ebox.connect("button-press-event", callback)
def show_all(self):
self._ebox.show()
self._hbox.show()
self._image.show()
self._label.show()
def set_image_from_file(self, image):
self._image.set_from_file(image)
def set_image_from_stock(self, stock):
self._image.set_from_stock(stock, gtk.ICON_SIZE_MENU)
def set_text(self, text):
if self._label.get_text() != text:
self._label.set_text(text)
def set_tooltip(self, tip):
if self._ebox.get_tooltip_text() != tip:
self._ebox.set_tooltip_text(tip)
def get_widgets(self):
return self._widgets
def get_eventbox(self):
return self._ebox
def get_text(self):
return self._label.get_text()
class StatusBar(component.Component):
def __init__(self):
component.Component.__init__(self, "StatusBar", interval=3)
self.window = component.get("MainWindow")
self.statusbar = self.window.get_builder().get_object("statusbar")
self.config = ConfigManager("gtkui.conf")
# Status variables that are updated via callback
self.max_connections = -1
self.num_connections = 0
self.max_download_speed = -1.0
self.download_rate = ""
self.max_upload_speed = -1.0
self.upload_rate = ""
self.dht_nodes = 0
self.dht_status = False
self.health = False
self.download_protocol_rate = 0.0
self.upload_protocol_rate = 0.0
self.config_value_changed_dict = {
"max_connections_global": self._on_max_connections_global,
"max_download_speed": self._on_max_download_speed,
"max_upload_speed": self._on_max_upload_speed,
"dht": self._on_dht
}
self.current_warnings = []
# Add a HBox to the statusbar after removing the initial label widget
self.hbox = gtk.HBox()
self.hbox.set_spacing(10)
frame = self.statusbar.get_children()[0]
frame.remove(frame.get_children()[0])
frame.add(self.hbox)
self.statusbar.show_all()
# Create the not connected item
self.not_connected_item = StatusBarItem(
stock=gtk.STOCK_STOP, text=_("Not Connected"),
callback=self._on_notconnected_item_clicked)
# Show the not connected status bar
self.show_not_connected()
# Hide if necessary
self.visible(self.config["show_statusbar"])
client.register_event_handler("ConfigValueChangedEvent", self.on_configvaluechanged_event)
def start(self):
# Add in images and labels
self.remove_item(self.not_connected_item)
self.connections_item = self.add_item(
stock=gtk.STOCK_NETWORK,
callback=self._on_connection_item_clicked,
tooltip=_("Connections"))
self.download_item = self.add_item(
image=deluge.common.get_pixmap("downloading16.png"),
callback=self._on_download_item_clicked,
tooltip=_("Download Speed"))
self.upload_item = self.add_item(
image=deluge.common.get_pixmap("seeding16.png"),
callback=self._on_upload_item_clicked,
tooltip=_("Upload Speed"))
self.traffic_item = self.add_item(
image=deluge.common.get_pixmap("traffic16.png"),
callback=self._on_traffic_item_clicked,
tooltip=_("Protocol Traffic Download/Upload"))
self.dht_item = StatusBarItem(
image=deluge.common.get_pixmap("dht16.png"), tooltip=_("DHT Nodes"))
self.diskspace_item = self.add_item(
stock=gtk.STOCK_HARDDISK,
callback=self._on_diskspace_item_clicked,
tooltip=_("Free Disk Space"))
self.health_item = self.add_item(
stock=gtk.STOCK_DIALOG_ERROR,
text=_("No Incoming Connections!"),
callback=self._on_health_icon_clicked)
self.health = False
# Get some config values
client.core.get_config_value(
"max_connections_global").addCallback(self._on_max_connections_global)
client.core.get_config_value(
"max_download_speed").addCallback(self._on_max_download_speed)
client.core.get_config_value(
"max_upload_speed").addCallback(self._on_max_upload_speed)
client.core.get_config_value("dht").addCallback(self._on_dht)
self.send_status_request()
def stop(self):
# When stopped, we just show the not connected thingy
try:
self.remove_item(self.connections_item)
self.remove_item(self.dht_item)
self.remove_item(self.download_item)
self.remove_item(self.upload_item)
self.remove_item(self.not_connected_item)
self.remove_item(self.health_item)
self.remove_item(self.traffic_item)
self.remove_item(self.diskspace_item)
except Exception, e:
log.debug("Unable to remove StatusBar item: %s", e)
self.show_not_connected()
def visible(self, visible):
if visible:
self.statusbar.show()
else:
self.statusbar.hide()
self.config["show_statusbar"] = visible
def show_not_connected(self):
self.hbox.pack_start(
self.not_connected_item.get_eventbox(), expand=False, fill=False)
def add_item(self, image=None, stock=None, text=None, callback=None, tooltip=None):
"""Adds an item to the status bar"""
# The return tuple.. we return whatever widgets we add
item = StatusBarItem(image, stock, text, callback, tooltip)
self.hbox.pack_start(item.get_eventbox(), expand=False, fill=False)
return item
def remove_item(self, item):
"""Removes an item from the statusbar"""
if item.get_eventbox() in self.hbox.get_children():
try:
self.hbox.remove(item.get_eventbox())
except Exception, e:
log.debug("Unable to remove widget: %s", e)
def add_timeout_item(self, seconds=3, image=None, stock=None, text=None, callback=None):
"""Adds an item to the StatusBar for seconds"""
item = self.add_item(image, stock, text, callback)
# Start a timer to remove this item in seconds
gobject.timeout_add(seconds * 1000, self.remove_item, item)
def display_warning(self, text, callback=None):
"""Displays a warning to the user in the status bar"""
if text not in self.current_warnings:
item = self.add_item(
stock=gtk.STOCK_DIALOG_WARNING, text=text, callback=callback)
self.current_warnings.append(text)
gobject.timeout_add(3000, self.remove_warning, item)
def remove_warning(self, item):
self.current_warnings.remove(item.get_text())
self.remove_item(item)
def clear_statusbar(self):
def remove(child):
self.hbox.remove(child)
self.hbox.foreach(remove)
def send_status_request(self):
# Sends an async request for data from the core
client.core.get_num_connections().addCallback(self._on_get_num_connections)
keys = ["upload_rate", "download_rate", "payload_upload_rate", "payload_download_rate"]
if self.dht_status:
keys.append("dht_nodes")
if not self.health:
keys.append("has_incoming_connections")
client.core.get_session_status(keys).addCallback(self._on_get_session_status)
client.core.get_free_space().addCallback(self._on_get_free_space)
def on_configvaluechanged_event(self, key, value):
"""
This is called when we receive a ConfigValueChangedEvent from
the core.
"""
if key in self.config_value_changed_dict.keys():
self.config_value_changed_dict[key](value)
def _on_max_connections_global(self, max_connections):
self.max_connections = max_connections
self.update_connections_label()
def _on_get_num_connections(self, num_connections):
self.num_connections = num_connections
self.update_connections_label()
def _on_dht(self, value):
self.dht_status = value
if value:
self.hbox.pack_start(
self.dht_item.get_eventbox(), expand=False, fill=False)
self.send_status_request()
else:
self.remove_item(self.dht_item)
def _on_get_session_status(self, status):
self.download_rate = deluge.common.fspeed(status["payload_download_rate"])
self.upload_rate = deluge.common.fspeed(status["payload_upload_rate"])
self.download_protocol_rate = (status["download_rate"] - status["payload_download_rate"]) / 1024
self.upload_protocol_rate = (status["upload_rate"] - status["payload_upload_rate"]) / 1024
self.update_download_label()
self.update_upload_label()
self.update_traffic_label()
if "dht_nodes" in status:
self.dht_nodes = status["dht_nodes"]
self.update_dht_label()
if "has_incoming_connections" in status:
self.health = status["has_incoming_connections"]
if self.health:
self.remove_item(self.health_item)
def _on_get_free_space(self, space):
self.diskspace_item.set_text(deluge.common.fsize(space))
def _on_max_download_speed(self, max_download_speed):
self.max_download_speed = max_download_speed
self.update_download_label()
def _on_max_upload_speed(self, max_upload_speed):
self.max_upload_speed = max_upload_speed
self.update_upload_label()
def update_connections_label(self):
# Set the max connections label
if self.max_connections < 0:
label_string = "%s" % self.num_connections
else:
label_string = "%s (%s)" % (self.num_connections, self.max_connections)
self.connections_item.set_text(label_string)
def update_dht_label(self):
# Set the max connections label
self.dht_item.set_text("%s" % (self.dht_nodes))
def update_download_label(self):
# Set the download speed label
if self.max_download_speed <= 0:
label_string = self.download_rate
else:
label_string = "%s (%s %s)" % (
self.download_rate, self.max_download_speed, _("KiB/s"))
self.download_item.set_text(label_string)
def update_upload_label(self):
# Set the upload speed label
if self.max_upload_speed <= 0:
label_string = self.upload_rate
else:
label_string = "%s (%s %s)" % (
self.upload_rate, self.max_upload_speed, _("KiB/s"))
self.upload_item.set_text(label_string)
def update_traffic_label(self):
label_string = "%.2f/%.2f %s" % (self.download_protocol_rate, self.upload_protocol_rate, _("KiB/s"))
self.traffic_item.set_text(label_string)
def update(self):
# Send status request
self.send_status_request()
def _on_download_item_clicked(self, widget, event):
menu = common.build_menu_radio_list(
self.config["tray_download_speed_list"],
self._on_set_download_speed,
self.max_download_speed,
_("KiB/s"), show_notset=True, show_other=True)
menu.show_all()
menu.popup(None, None, None, event.button, event.time)
def _on_set_download_speed(self, widget):
log.debug("_on_set_download_speed")
if widget.get_name() == _("Unlimited"):
value = -1
elif widget.get_name() == _("Other..."):
value = common.show_other_dialog(
_("Set Maximum Download Speed"), _("KiB/s"), None, "downloading.svg", self.max_download_speed)
if value == None:
return
else:
value = float(widget.get_children()[0].get_text().split(" ")[0])
log.debug("value: %s", value)
# Set the config in the core
if value != self.max_download_speed:
client.core.set_config({"max_download_speed": value})
def _on_upload_item_clicked(self, widget, event):
menu = common.build_menu_radio_list(
self.config["tray_upload_speed_list"],
self._on_set_upload_speed,
self.max_upload_speed,
_("KiB/s"), show_notset=True, show_other=True)
menu.show_all()
menu.popup(None, None, None, event.button, event.time)
def _on_set_upload_speed(self, widget):
log.debug("_on_set_upload_speed")
if widget.get_name() == _("Unlimited"):
value = -1
elif widget.get_name() == _("Other..."):
value = common.show_other_dialog(
_("Set Maximum Upload Speed"), _("KiB/s"), None, "seeding.svg", self.max_upload_speed)
if value == None:
return
else:
value = float(widget.get_children()[0].get_text().split(" ")[0])
log.debug("value: %s", value)
# Set the config in the core
if value != self.max_upload_speed:
client.core.set_config({"max_upload_speed": value})
def _on_connection_item_clicked(self, widget, event):
menu = common.build_menu_radio_list(
self.config["connection_limit_list"],
self._on_set_connection_limit,
self.max_connections, show_notset=True, show_other=True)
menu.show_all()
menu.popup(None, None, None, event.button, event.time)
def _on_set_connection_limit(self, widget):
log.debug("_on_set_connection_limit")
if widget.get_name() == _("Unlimited"):
value = -1
elif widget.get_name() == _("Other..."):
value = common.show_other_dialog(
_("Set Maximum Connections"), "", gtk.STOCK_NETWORK, None, self.max_connections)
if value == None:
return
else:
value = int(widget.get_children()[0].get_text().split(" ")[0])
log.debug("value: %s", value)
# Set the config in the core
if value != self.max_connections:
client.core.set_config({"max_connections_global": value})
def _on_health_icon_clicked(self, widget, event):
component.get("Preferences").show("Network")
def _on_notconnected_item_clicked(self, widget, event):
component.get("ConnectionManager").show()
def _on_traffic_item_clicked(self, widget, event):
component.get("Preferences").show("Network")
def _on_diskspace_item_clicked(self, widget, event):
component.get("Preferences").show("Downloads")
| voltaicsca/deluge | deluge/ui/gtkui/statusbar.py | Python | gpl-3.0 | 17,379 |
import numpy
import chainer
from chainer import backend
from chainer import function_node
from chainer.utils import type_check
class MeanAbsoluteError(function_node.FunctionNode):
"""Mean absolute error function."""
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x0', 'x1'))
type_check.expect(
in_types[0].dtype.kind == 'f',
in_types[0].dtype == in_types[1].dtype,
in_types[0].shape == in_types[1].shape
)
def forward_cpu(self, inputs):
x0, x1 = inputs
self.diff = x0 - x1
diff = self.diff.ravel()
return numpy.array(abs(diff).sum() / diff.size, dtype=diff.dtype),
def forward_gpu(self, inputs):
x0, x1 = inputs
self.diff = x0 - x1
diff = self.diff.ravel()
return abs(diff).sum() / diff.dtype.type(diff.size),
def backward(self, indexes, grad_outputs):
gy, = grad_outputs
coeff = gy * gy.data.dtype.type(1. / self.diff.size)
coeff = chainer.functions.broadcast_to(coeff, self.diff.shape)
gx0 = coeff * backend.get_array_module(gy.data).sign(self.diff)
return gx0, -gx0
def mean_absolute_error(x0, x1):
"""Mean absolute error function.
The function computes the mean absolute error between two variables. The
mean is taken over the minibatch. Args ``x0`` and ``x1`` must have the
same dimensions. This function first calculates the absolute value
differences between the corresponding elements in x0 and x1, and then
returns the mean of those differences.
Args:
x0 (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
x1 (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable:
A variable holding an array representing the mean absolute
error of two inputs.
.. admonition:: Example
1D array examples:
>>> x = np.array([1, 2, 3]).astype(np.float32)
>>> y = np.array([0, 0, 0]).astype(np.float32)
>>> F.mean_absolute_error(x, y)
variable(2.)
>>> x = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
>>> y = np.array([7, 8, 9, 10, 11, 12]).astype(np.float32)
>>> F.mean_absolute_error(x, y)
variable(6.)
2D array example:
In this example, there are 4 elements, and thus 4 errors
>>> x = np.array([[1, 2], [3, 4]]).astype(np.float32)
>>> y = np.array([[8, 8], [8, 8]]).astype(np.float32)
>>> F.mean_absolute_error(x, y)
variable(5.5)
3D array example:
In this example, there are 8 elements, and thus 8 errors
>>> x = np.reshape(np.array([1, 2, 3, 4, 5, 6, 7, 8]), (2, 2, 2))
>>> y = np.reshape(np.array([8, 8, 8, 8, 8, 8, 8, 8]), (2, 2, 2))
>>> x = x.astype(np.float32)
>>> y = y.astype(np.float32)
>>> F.mean_absolute_error(x, y)
variable(3.5)
"""
return MeanAbsoluteError().apply((x0, x1))[0]
| tkerola/chainer | chainer/functions/loss/mean_absolute_error.py | Python | mit | 3,041 |
import io
from pdfminer.converter import TextConverter
from pdfminer.pdfinterp import PDFPageInterpreter
from pdfminer.pdfinterp import PDFResourceManager
from pdfminer.pdfpage import PDFPage
def extract_text_from_pdf(/Users/hmw/PycharmProjects/test/landrover.pdf):
resource_manager = PDFResourceManager()
fake_file_handle = io.StringIO()
converter = TextConverter(resource_manager, fake_file_handle)
page_interpreter = PDFPageInterpreter(resource_manager, converter)
with open(/Users/hmw/PycharmProjects/test/landrover.pdf, 'rb',) as fh:
for page in PDFPage.get_pages(fh,
caching=True,
check_extractable=True):
page_interpreter.process_page(page)
text = fake_file_handle.getvalue()
# close open handles
converter.close()
fake_file_handle.close()
if text:
return text
if __name__ == '__main__':
print(extract_text_from_pdf('landrover.pdf')) | BIMobject-Ben/test | pdf.py | Python | bsd-3-clause | 1,001 |
from alabtools.utils import Genome, Index
import h5py
import os
def test_genome_io():
g = Genome('hg38')
with h5py.File('./teststr.h5', 'w') as f:
g.save(f)
with h5py.File('./teststr.h5', 'r') as f:
g2 = Genome(f)
os.remove('./teststr.h5')
assert g == g2
def test_index_io():
with open('testindex5.txt', 'w') as f:
f.write('#comment line\n')
f.write('chr1 0 1000 gap 0\n')
f.write('chr3 0 1000 domain 2\n')
# load without genome
i = Index('testindex5.txt')
with h5py.File('./teststr.h5', 'w') as f:
i.save(f)
with h5py.File('./teststr.h5', 'r') as f:
ii = Index(f)
assert ii == i
# load with genome
i = Index('testindex5.txt', genome='hg38')
with h5py.File('./teststr.h5', 'w') as f:
i.save(f)
with h5py.File('./teststr.h5', 'r') as f:
ii = Index(f)
assert ii == i
os.remove('./teststr.h5')
os.remove('./testindex5.txt')
| alberlab/alabtools | test/test_utils.py | Python | gpl-3.0 | 974 |
# Create your views here.
from django.contrib.auth.models import Group
from django.core.validators import URLValidator
from django.core.exceptions import ValidationError
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext
import os
from querystring_parser import parser
import simplejson
from simplejson import dumps
from social.backends.google import GooglePlusAuth
from madrona.features import get_feature_by_uid
import settings
from .models import *
from data_manager.models import *
from mp_settings.models import *
def show_planner(request, project=None, template='planner.html'):
try:
socket_url = settings.SOCKET_URL
except AttributeError:
socket_url = ''
try:
if project:
mp_settings = MarinePlannerSettings.objects.get(slug_name=project)
else:
mp_settings = MarinePlannerSettings.objects.get(active=True)
project_name = mp_settings.project_name
latitude = mp_settings.latitude
longitude = mp_settings.longitude
zoom = mp_settings.zoom
default_hash = mp_settings.default_hash
min_zoom = mp_settings.min_zoom
max_zoom = mp_settings.max_zoom
project_logo = mp_settings.project_logo
try:
if project_logo:
url_validator = URLValidator()
url_validator(project_logo)
except ValidationError, e:
project_logo = os.path.join(settings.MEDIA_URL, project_logo)
project_icon = mp_settings.project_icon
try:
url_validator = URLValidator()
url_validator(project_icon)
except ValidationError, e:
project_icon = os.path.join(settings.MEDIA_URL, project_icon)
project_home_page = mp_settings.project_home_page
enable_drawing = mp_settings.enable_drawing
bitly_registered_domain = mp_settings.bitly_registered_domain
bitly_username = mp_settings.bitly_username
bitly_api_key = mp_settings.bitly_api_key
except:
project_name = project_logo = project_icon = project_home_page = bitly_registered_domain = bitly_username = bitly_api_key = default_hash = ""
latitude = longitude = zoom = min_zoom = max_zoom = None
enable_drawing = False
context = {
'MEDIA_URL': settings.MEDIA_URL, 'SOCKET_URL': socket_url, 'login': 'true',
'project_name': project_name, 'latitude': latitude, 'longitude': longitude, 'zoom': zoom,
'default_hash': default_hash, 'min_zoom': min_zoom, 'max_zoom': max_zoom,
'project_logo': project_logo, 'project_icon': project_icon, 'project_home_page': project_home_page,
'enable_drawing': enable_drawing,
'bitly_registered_domain': bitly_registered_domain, 'bitly_username': bitly_username, 'bitly_api_key': bitly_api_key
}
if request.user.is_authenticated:
context['session'] = request.session._session_key
if request.user.is_authenticated() and request.user.social_auth.all().count() > 0:
context['picture'] = request.user.social_auth.all()[0].extra_data.get('picture')
if settings.SOCIAL_AUTH_GOOGLE_PLUS_KEY:
context['plus_scope'] = ' '.join(GooglePlusAuth.DEFAULT_SCOPE)
context['plus_id'] = settings.SOCIAL_AUTH_GOOGLE_PLUS_KEY
if settings.UNDER_MAINTENANCE_TEMPLATE:
return render_to_response('under_maintenance.html',
RequestContext(request, context))
return render_to_response(template, RequestContext(request, context))
def show_embedded_map(request, project=None, template='map.html'):
try:
if project:
mp_settings = MarinePlannerSettings.objects.get(slug_name=project)
else:
mp_settings = MarinePlannerSettings.objects.get(active=True)
project_name = mp_settings.project_name
project_logo = mp_settings.project_logo
try:
if project_logo:
url_validator = URLValidator(verify_exists=False)
url_validator(project_logo)
except ValidationError, e:
project_logo = os.path.join(settings.MEDIA_URL, project_logo)
project_home_page = mp_settings.project_home_page
except:
project_name = project_logo = project_home_page = None
context = {
'MEDIA_URL': settings.MEDIA_URL,
'project_name': project_name,
'project_logo': project_logo,
'project_home_page': project_home_page
}
#context = {'MEDIA_URL': settings.MEDIA_URL}
return render_to_response(template, RequestContext(request, context))
def show_mobile_map(request, project=None, template='mobile-map.html'):
try:
if project:
mp_settings = MarinePlannerSettings.objects.get(slug_name=project)
else:
mp_settings = MarinePlannerSettings.objects.get(active=True)
print 'so far so good'
project_name = mp_settings.project_name
project_logo = mp_settings.project_logo
print project_name
print project_logo
# try:
# if project_logo:
# url_validator = URLValidator(verify_exists=False)
# url_validator(project_logo)
# except ValidationError, e:
# project_logo = os.path.join(settings.MEDIA_URL, project_logo)
print 'almost there...'
project_home_page = mp_settings.project_home_page
print 'here we go...'
latitude = mp_settings.latitude
print latitude
longitude = mp_settings.longitude
print longitude
zoom = mp_settings.zoom
print zoom
min_zoom = mp_settings.min_zoom
max_zoom = mp_settings.max_zoom
print min_zoom
print max_zoom
except:
project_name = project_logo = project_home_page = None
context = {
'MEDIA_URL': settings.MEDIA_URL,
# 'project_name': project_name,
# 'project_logo': project_logo,
# 'project_home_page': project_home_page
'latitude': latitude,
'longitude': longitude,
'zoom': zoom
}
#context = {'MEDIA_URL': settings.MEDIA_URL}
return render_to_response(template, RequestContext(request, context))
def get_sharing_groups(request):
from madrona.features import user_sharing_groups
from functools import cmp_to_key
import locale
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
json = []
sharing_groups = user_sharing_groups(request.user)
for group in sharing_groups:
members = []
for user in group.user_set.all():
if user.first_name.replace(' ', '') != '' and user.last_name.replace(' ', '') != '':
members.append(user.first_name + ' ' + user.last_name)
else:
members.append(user.username)
sorted_members = sorted(members, key=cmp_to_key(locale.strcoll))
json.append({
'group_name': group.name,
'group_slug': slugify(group.name)+'-sharing',
'members': sorted_members
})
return HttpResponse(dumps(json))
'''
'''
def share_bookmark(request):
group_names = request.POST.getlist('groups[]')
bookmark_uid = request.POST['bookmark']
bookmark = get_feature_by_uid(bookmark_uid)
viewable, response = bookmark.is_viewable(request.user)
if not viewable:
return response
#remove previously shared with groups, before sharing with new list
bookmark.share_with(None)
groups = []
for group_name in group_names:
groups.append(Group.objects.get(name=group_name))
bookmark.share_with(groups, append=False)
return HttpResponse("", status=200)
'''
'''
def get_bookmarks(request):
#sync the client-side bookmarks with the server side bookmarks
#update the server-side bookmarks and return the new list
try:
bookmark_dict = parser.parse(request.POST.urlencode())['bookmarks']
except:
bookmark_dict = {}
try:
#loop through the list from the client
#if user, bm_name, and bm_state match then skip
#otherwise, add to the db
for key,bookmark in bookmark_dict.items():
try:
Bookmark.objects.get(user=request.user, name=bookmark['name'], url_hash=bookmark['hash'])
except Bookmark.DoesNotExist:
new_bookmark = Bookmark(user=request.user, name=bookmark['name'], url_hash=bookmark['hash'])
new_bookmark.save()
except:
continue
#grab all bookmarks belonging to this user
#serialize bookmarks into 'name', 'hash' objects and return simplejson dump
content = []
bookmark_list = Bookmark.objects.filter(user=request.user)
for bookmark in bookmark_list:
sharing_groups = [group.name for group in bookmark.sharing_groups.all()]
content.append({
'uid': bookmark.uid,
'name': bookmark.name,
'hash': bookmark.url_hash,
'sharing_groups': sharing_groups
})
shared_bookmarks = Bookmark.objects.shared_with_user(request.user)
for bookmark in shared_bookmarks:
if bookmark not in bookmark_list:
username = bookmark.user.username
actual_name = bookmark.user.first_name + ' ' + bookmark.user.last_name
content.append({
'uid': bookmark.uid,
'name': bookmark.name,
'hash': bookmark.url_hash,
'shared': True,
'shared_by_username': username,
'shared_by_name': actual_name
})
return HttpResponse(simplejson.dumps(content), content_type="application/json", status=200)
except:
return HttpResponse(status=304)
def remove_bookmark(request):
try:
bookmark_uid = request.POST['uid']
bookmark = get_feature_by_uid(bookmark_uid)
viewable, response = bookmark.is_viewable(request.user)
if not viewable:
return response
bookmark.delete()
return HttpResponse(status=200)
except:
return HttpResponse(status=304)
def add_bookmark(request):
try:
bookmark = Bookmark(user=request.user, name=request.POST.get('name'), url_hash=request.POST.get('hash'))
bookmark.save()
sharing_groups = [group.name for group in bookmark.sharing_groups.all()]
content = []
content.append({
'uid': bookmark.uid,
'name': bookmark.name,
'hash': bookmark.url_hash,
'sharing_groups': sharing_groups
})
print 'returning content'
return HttpResponse(simplejson.dumps(content), content_type="application/json", status=200)
except:
return HttpResponse(status=304)
| Ecotrust/PEW-EFH | mp/visualize/views.py | Python | apache-2.0 | 11,227 |
#!/usr/bin/python
# -*- coding: utf8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
"Herramienta para procesar y consultar el Padrón Unico de Contribuyentes AFIP"
# Documentación e información adicional:
# http://www.sistemasagiles.com.ar/trac/wiki/PadronContribuyentesAFIP
__author__ = "Mariano Reingart <[email protected]>"
__copyright__ = "Copyright (C) 2014-2016 Mariano Reingart"
__license__ = "GPL 3.0"
__version__ = "1.08a"
import csv
import json
import os
import shelve
import socket
import sqlite3
import urllib2
import zipfile
from email.utils import formatdate
import sys
import warnings
from utils import leer, escribir, N, A, I, get_install_dir, safe_console, \
inicializar_y_capturar_excepciones_simple, WebClient, norm, \
exception_info
# formato y ubicación archivo completo de la condición tributaria según RG 1817
FORMATO = [
("nro_doc", 11, N, ""),
("denominacion", 30, A, ""),
("imp_ganancias", 2, A, "'NI', 'AC','EX', 'NC'"),
("imp_iva", 2, A, "'NI' , 'AC','EX','NA','XN','AN'"),
("monotributo", 2, A, "'NI', 'Codigo categoria tributaria'"),
("integrante_soc", 1, A, "'N' , 'S'"),
("empleador", 1, A, "'N', 'S'"),
("actividad_monotributo", 2, A, ""),
("tipo_doc", 2, N, "80: CUIT, 96: DNI, etc."),
("cat_iva", 2, N, "1: RI, 4: EX, 5: CF, 6: MT, etc"),
("email", 250, A, ""),
]
# Mapeos constantes:
PROVINCIAS = {0: 'CIUDAD AUTONOMA BUENOS AIRES', 1: 'BUENOS AIRES',
2: 'CATAMARCA', 3: 'CORDOBA', 4: 'CORRIENTES', 5: 'ENTRE RIOS', 6: 'JUJUY',
7: 'MENDOZA', 8: 'LA RIOJA', 9: 'SALTA', 10: 'SAN JUAN', 11: 'SAN LUIS',
12: 'SANTA FE', 13: 'SANTIAGO DEL ESTERO', 14: 'TUCUMAN', 16: 'CHACO',
17: 'CHUBUT', 18: 'FORMOSA', 19: 'MISIONES', 20: 'NEUQUEN', 21: 'LA PAMPA',
22: 'RIO NEGRO', 23: 'SANTA CRUZ', 24: 'TIERRA DEL FUEGO'}
TIPO_CLAVE = {'CUIT': 80, 'CUIL': 86, 'CDI': 86, 'DNI': 96, 'Otro': 99}
DEBUG = True
URL = "http://www.afip.gob.ar/genericos/cInscripcion/archivos/apellidoNombreDenominacion.zip"
URL_API = "https://soa.afip.gob.ar/"
class PadronAFIP():
"Interfaz para consultar situación tributaria (Constancia de Inscripcion)"
_public_methods_ = ['Buscar', 'Descargar', 'Procesar', 'Guardar',
'ConsultarDomicilios', 'Consultar', 'Conectar',
'DescargarConstancia', 'MostrarPDF', 'BuscarCUIT',
"ObtenerTablaParametros",
]
_public_attrs_ = ['InstallDir', 'Traceback', 'Excepcion', 'Version',
'cuit', 'dni', 'denominacion', 'imp_ganancias', 'imp_iva',
'monotributo', 'integrante_soc', 'empleador',
'actividad_monotributo', 'cat_iva', 'domicilios',
'tipo_doc', 'nro_doc', 'LanzarExcepciones',
'tipo_persona', 'estado', 'impuestos', 'actividades',
'direccion', 'localidad', 'provincia', 'cod_postal',
'data', 'response',
]
_readonly_attrs_ = _public_attrs_[3:-1]
_reg_progid_ = "PadronAFIP"
_reg_clsid_ = "{6206DF5E-3EEF-47E9-A532-CD81EBBAF3AA}"
def __init__(self):
self.db_path = os.path.join(self.InstallDir, "padron.db")
self.Version = __version__
# Abrir la base de datos
self.db = sqlite3.connect(self.db_path)
self.db.row_factory = sqlite3.Row
self.cursor = self.db.cursor()
self.LanzarExcepciones = False
self.inicializar()
self.client = None
def inicializar(self):
self.Excepcion = self.Traceback = ""
self.cuit = self.dni = 0
self.tipo_persona = "" # FISICA o JURIDICA
self.tipo_doc = 0
self.estado = "" # ACTIVO
self.denominacion = ""
self.direccion = self.localidad = self.provincia = self.cod_postal = ""
self.domicilios = []
self.impuestos = []
self.actividades = []
self.imp_iva = self.empleador = self.integrante_soc = self.cat_iva = ""
self.monotributo = self.actividad_monotributo = ""
self.data = {}
self.response = ""
@inicializar_y_capturar_excepciones_simple
def Conectar(self, url=URL_API, proxy="", wrapper=None, cacert=None, trace=False):
self.client = WebClient(location=url, trace=trace, cacert=cacert)
self.client.method = "GET" # metodo RESTful predeterminado
self.client.enctype = None # no enviar body
return True
@inicializar_y_capturar_excepciones_simple
def Descargar(self, url=URL, filename="padron.txt", proxy=None):
"Descarga el archivo de AFIP, devuelve 200 o 304 si no fue modificado"
proxies = {}
if proxy:
proxies['http'] = proxy
proxies['https'] = proxy
proxy_handler = urllib2.ProxyHandler(proxies)
print "Abriendo URL %s ..." % url
req = urllib2.Request(url)
if os.path.exists(filename):
http_date = formatdate(timeval=os.path.getmtime(filename),
localtime=False, usegmt=True)
req.add_header('If-Modified-Since', http_date)
try:
web = urllib2.urlopen(req)
except urllib2.HTTPError, e:
if e.code == 304:
print "No modificado desde", http_date
return 304
else:
raise
# leer info del request:
meta = web.info()
lenght = float(meta['Content-Length'])
date = meta['Last-Modified']
tmp = open(filename + ".zip", "wb")
print "Guardando"
size = 0
p0 = None
while True:
p = int(size / lenght * 100)
if p0 is None or p>p0:
print "Leyendo ... %0d %%" % p
p0 = p
data = web.read(1024*100)
size = size + len(data)
if not data:
print "Descarga Terminada!"
break
tmp.write(data)
print "Abriendo ZIP..."
tmp.close()
web.close()
uf = open(filename + ".zip", "rb")
zf = zipfile.ZipFile(uf)
for fn in zf.namelist():
print "descomprimiendo", fn
tf = open(filename, "wb")
tf.write(zf.read(fn))
tf.close()
return 200
@inicializar_y_capturar_excepciones_simple
def Procesar(self, filename="padron.txt", borrar=False):
"Analiza y crea la base de datos interna sqlite para consultas"
f = open(filename, "r")
keys = [k for k, l, t, d in FORMATO]
# conversion a planilla csv (no usado)
if False and not os.path.exists("padron.csv"):
csvfile = open('padron.csv', 'wb')
import csv
wr = csv.writer(csvfile, delimiter=',',
quotechar='"', quoting=csv.QUOTE_MINIMAL)
for i, l in enumerate(f):
if i % 100000 == 0:
print "Progreso: %d registros" % i
r = leer(l, FORMATO)
row = [r[k] for k in keys]
wr.writerow(row)
csvfile.close()
f.seek(0)
if os.path.exists(self.db_path) and borrar:
os.remove(self.db_path)
if True:
db = db = sqlite3.connect(self.db_path)
c = db.cursor()
c.execute("CREATE TABLE padron ("
"nro_doc INTEGER, "
"denominacion VARCHAR(30), "
"imp_ganancias VARCHAR(2), "
"imp_iva VARCHAR(2), "
"monotributo VARCHAR(1), "
"integrante_soc VARCHAR(1), "
"empleador VARCHAR(1), "
"actividad_monotributo VARCHAR(2), "
"tipo_doc INTEGER, "
"cat_iva INTEGER DEFAULT NULL, "
"email VARCHAR(250), "
"PRIMARY KEY (tipo_doc, nro_doc)"
");")
c.execute("CREATE TABLE domicilio ("
"id INTEGER PRIMARY KEY AUTOINCREMENT, "
"tipo_doc INTEGER, "
"nro_doc INTEGER, "
"direccion TEXT, "
"FOREIGN KEY (tipo_doc, nro_doc) REFERENCES padron "
");")
# importar los datos a la base sqlite
for i, l in enumerate(f):
if i % 10000 == 0: print i
l = l.strip("\x00")
r = leer(l, FORMATO)
params = [r[k] for k in keys]
params[8] = 80 # agrego tipo_doc = CUIT
params[9] = None # cat_iva no viene de AFIP
placeholders = ", ".join(["?"] * len(params))
c.execute("INSERT INTO padron VALUES (%s)" % placeholders,
params)
db.commit()
c.close()
db.close()
@inicializar_y_capturar_excepciones_simple
def Buscar(self, nro_doc, tipo_doc=80):
"Devuelve True si fue encontrado y establece atributos con datos"
# cuit: codigo único de identificación tributaria del contribuyente
# (sin guiones)
self.cursor.execute("SELECT * FROM padron WHERE "
" tipo_doc=? AND nro_doc=?", [tipo_doc, nro_doc])
row = self.cursor.fetchone()
for key in [k for k, l, t, d in FORMATO]:
if row:
val = row[key]
if not isinstance(val, basestring):
val = str(row[key])
setattr(self, key, val)
else:
setattr(self, key, '')
if self.tipo_doc == 80:
self.cuit = self.nro_doc
elif self.tipo_doc == 96:
self.dni = self.nro_doc
# determinar categoría de IVA (tentativa)
try:
cat_iva = int(self.cat_iva)
except ValueError:
cat_iva = None
if cat_iva:
pass
elif self.imp_iva in ('AC', 'S'):
self.cat_iva = 1 # RI
elif self.imp_iva == 'EX':
self.cat_iva = 4 # EX
elif self.monotributo:
self.cat_iva = 6 # MT
else:
self.cat_iva = 5 # CF
return True if row else False
@inicializar_y_capturar_excepciones_simple
def ConsultarDomicilios(self, nro_doc, tipo_doc=80, cat_iva=None):
"Busca los domicilios, devuelve la cantidad y establece la lista"
self.cursor.execute("SELECT direccion FROM domicilio WHERE "
" tipo_doc=? AND nro_doc=? ORDER BY id ",
[tipo_doc, nro_doc])
filas = self.cursor.fetchall()
self.domicilios = [fila['direccion'] for fila in filas]
return len(filas)
@inicializar_y_capturar_excepciones_simple
def BuscarCUIT(self, denominacion, limite=10):
"Busca por nombre, devuelve una lista con tipo y nro de documento"
sql = ("SELECT tipo_doc, nro_doc, denominacion "
"FROM padron_fts WHERE denominacion MATCH ? "
"LIMIT ?")
denominacion = denominacion.replace(" ", "*")
self.cursor.execute(sql, ["*{}*".format(denominacion), limite])
filas = self.cursor.fetchall()
return [dict(fila) for fila in filas]
@inicializar_y_capturar_excepciones_simple
def Guardar(self, tipo_doc, nro_doc, denominacion, cat_iva, direccion,
email, imp_ganancias='NI', imp_iva='NI', monotributo='NI',
integrante_soc='N', empleador='N'):
"Agregar o actualizar los datos del cliente"
if self.Buscar(nro_doc, tipo_doc):
sql = ("UPDATE padron SET denominacion=?, cat_iva=?, email=?, "
"imp_ganancias=?, imp_iva=?, monotributo=?, "
"integrante_soc=?, empleador=? "
"WHERE tipo_doc=? AND nro_doc=?")
params = [denominacion, cat_iva, email, imp_ganancias,
imp_iva, monotributo, integrante_soc, empleador,
tipo_doc, nro_doc]
else:
sql = ("INSERT INTO padron (tipo_doc, nro_doc, denominacion, "
"cat_iva, email, imp_ganancias, imp_iva, monotributo, "
"integrante_soc, empleador) "
"VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
params = [tipo_doc, nro_doc, denominacion, cat_iva, email,
imp_ganancias, imp_iva, monotributo,
integrante_soc, empleador]
self.cursor.execute(sql, params)
# agregar el domicilio solo si no existe:
if direccion:
self.cursor.execute("SELECT * FROM domicilio WHERE direccion=? "
"AND tipo_doc=? AND nro_doc=?",
[direccion, tipo_doc, nro_doc])
if self.cursor.rowcount < 0:
sql = ("INSERT INTO domicilio (nro_doc, tipo_doc, direccion)"
"VALUES (?, ?, ?)")
self.cursor.execute(sql, [nro_doc, tipo_doc, direccion])
self.db.commit()
return True
@inicializar_y_capturar_excepciones_simple
def Consultar(self, nro_doc):
"Llama a la API pública de AFIP para obtener los datos de una persona"
n = 0
while n <= 4:
n += 1 # reintentar 3 veces
try:
if not self.client:
if DEBUG:
warnings.warn("reconectando intento [%d]..." % n)
self.Conectar()
self.response = self.client("sr-padron", "v2", "persona", str(nro_doc))
except Exception as e:
self.client = None
ex = exception_info()
self.Traceback = ex.get("tb", "")
try:
self.Excepcion = norm(ex.get("msg", "").replace("\n", ""))
except:
self.Excepcion = "<no disponible>"
if DEBUG:
warnings.warn("Error %s [%d]" % (self.Excepcion, n))
else:
break
else:
return False
result = json.loads(self.response)
if result['success']:
data = result['data']
# extraigo datos generales del contribuyente:
self.cuit = data["idPersona"]
self.tipo_persona = data["tipoPersona"]
self.tipo_doc = TIPO_CLAVE.get(data["tipoClave"])
self.dni = data.get("numeroDocumento")
self.estado = data.get("estadoClave")
self.denominacion = data.get("nombre")
# analizo el domicilio
domicilio = data.get("domicilioFiscal")
if domicilio:
self.direccion = domicilio.get("direccion", "")
self.localidad = domicilio.get("localidad", "") # no usado en CABA
self.provincia = PROVINCIAS.get(domicilio.get("idProvincia"), "")
self.cod_postal = domicilio.get("codPostal")
else:
self.direccion = self.localidad = self.provincia = ""
self.cod_postal = ""
# retrocompatibilidad:
self.domicilios = ["%s - %s (%s) - %s" % (
self.direccion, self.localidad,
self.cod_postal, self.provincia,) ]
# analizo impuestos:
self.impuestos = data.get("impuestos", [])
self.actividades = data.get("actividades", [])
if 32 in self.impuestos:
self.imp_iva = "EX"
elif 33 in self.impuestos:
self.imp_iva = "NI"
elif 34 in self.impuestos:
self.imp_iva = "NA"
else:
self.imp_iva = "S" if 30 in self.impuestos else "N"
mt = data.get("categoriasMonotributo", {})
self.monotributo = "S" if mt else "N"
self.actividad_monotributo = "" # TODO: mt[0].get("idCategoria")
self.integrante_soc = ""
self.empleador = "S" if 301 in self.impuestos else "N"
self.cat_iva = ""
self.data = data
else:
error = result['error']
self.Excepcion = error['mensaje']
return True
@inicializar_y_capturar_excepciones_simple
def DescargarConstancia(self, nro_doc, filename="constancia.pdf"):
"Llama a la API para descargar una constancia de inscripcion (PDF)"
if not self.client:
self.Conectar()
self.response = self.client("sr-padron", "v1", "constancia", str(nro_doc))
if self.response.startswith("{"):
result = json.loads(self.response)
assert not result["success"]
self.Excepcion = result['error']['mensaje']
return False
else:
with open(filename, "wb") as f:
f.write(self.response)
return True
@inicializar_y_capturar_excepciones_simple
def MostrarPDF(self, archivo, imprimir=False):
if sys.platform.startswith(("linux2", 'java')):
os.system("evince ""%s""" % archivo)
else:
operation = imprimir and "print" or ""
os.startfile(archivo, operation)
return True
@inicializar_y_capturar_excepciones_simple
def ObtenerTablaParametros(self, tipo_recurso, sep="||"):
"Devuelve un array de elementos que tienen id y descripción"
if not self.client:
self.Conectar()
self.response = self.client("parametros", "v1", tipo_recurso)
result = json.loads(self.response)
ret = {}
if result['success']:
data = result['data']
# armo un diccionario con los datos devueltos:
key = [k for k in data[0].keys() if k.startswith("id")][0]
val = [k for k in data[0].keys() if k.startswith("desc")][0]
for it in data:
ret[it[key]] = it[val]
self.data = data
else:
error = result['error']
self.Excepcion = error['mensaje']
if sep:
return ["%s%%s%s%%s%s" % (sep, sep, sep) % it for it in sorted(ret.items())]
else:
return ret
# busco el directorio de instalación (global para que no cambie si usan otra dll)
INSTALL_DIR = PadronAFIP.InstallDir = get_install_dir()
if __name__ == "__main__":
safe_console()
if "--register" in sys.argv or "--unregister" in sys.argv:
import win32com.server.register
win32com.server.register.UseCommandLine(PadronAFIP)
else:
padron = PadronAFIP()
padron.LanzarExcepciones = True
import time
t0 = time.time()
if "--descargar" in sys.argv:
padron.Descargar()
if "--procesar" in sys.argv:
padron.Procesar(borrar='--borrar' in sys.argv)
if "--parametros" in sys.argv:
import codecs, locale, traceback
if sys.stdout.encoding is None:
sys.stdout = codecs.getwriter(locale.getpreferredencoding())(sys.stdout,"replace");
sys.stderr = codecs.getwriter(locale.getpreferredencoding())(sys.stderr,"replace");
print "=== Impuestos ==="
print u'\n'.join(padron.ObtenerTablaParametros("impuestos"))
print "=== Conceptos ==="
print u'\n'.join(padron.ObtenerTablaParametros("conceptos"))
print "=== Actividades ==="
print u'\n'.join(padron.ObtenerTablaParametros("actividades"))
print "=== Caracterizaciones ==="
print u'\n'.join(padron.ObtenerTablaParametros("caracterizaciones"))
print "=== Categorias Monotributo ==="
print u'\n'.join(padron.ObtenerTablaParametros("categoriasMonotributo"))
print "=== Categorias Autonomos ==="
print u'\n'.join(padron.ObtenerTablaParametros("categoriasAutonomo"))
if '--csv' in sys.argv:
csv_reader = csv.reader(open("entrada.csv", "rU"),
dialect='excel', delimiter=",")
csv_writer = csv.writer(open("salida.csv", "w"),
dialect='excel', delimiter=",")
encabezado = next(csv_reader)
columnas = ["cuit", "denominacion", "estado", "direccion",
"localidad", "provincia", "cod_postal",
"impuestos", "actividades", "imp_iva",
"monotributo", "actividad_monotributo",
"empleador", "imp_ganancias", "integrante_soc"]
csv_writer.writerow(columnas)
for fila in csv_reader:
cuit = (fila[0] if fila else "").replace("-", "")
if cuit.isdigit():
if '--online' in sys.argv:
padron.Conectar(trace="--trace" in sys.argv)
print "Consultando AFIP online...", cuit,
ok = padron.Consultar(cuit)
else:
print "Consultando AFIP local...", cuit,
ok = padron.Buscar(cuit)
print 'ok' if ok else "error", padron.Excepcion
# domicilio posiblemente esté en Latin1, normalizar
csv_writer.writerow([norm(getattr(padron, campo, ""))
for campo in columnas])
elif "--reconex" in sys.argv:
padron.Conectar(trace="--trace" in sys.argv)
cuit = 20267565393
for i in range(10000):
t0 = time.time()
ok = padron.Consultar(cuit)
t1 = time.time()
print("%2.4f" % (t1-t0))
else:
cuit = len(sys.argv)>1 and sys.argv[1] or "20267565393"
# consultar un cuit:
if '--online' in sys.argv:
padron.Conectar(trace="--trace" in sys.argv)
print "Consultando AFIP online...",
ok = padron.Consultar(cuit)
print 'ok' if ok else "error", padron.Excepcion
print "Denominacion:", padron.denominacion
print "CUIT:", padron.cuit
print "Tipo:", padron.tipo_persona, padron.tipo_doc, padron.dni
print "Estado:", padron.estado
print "Direccion:", padron.direccion
print "Localidad:", padron.localidad
print "Provincia:", padron.provincia
print "Codigo Postal:", padron.cod_postal
print "Impuestos:", padron.impuestos
print "Actividades:", padron.actividades
print "IVA", padron.imp_iva
print "MT", padron.monotributo, padron.actividad_monotributo
print "Empleador", padron.empleador
elif '--constancia' in sys.argv:
filename = sys.argv[2]
print "Descargando constancia AFIP online...", cuit, filename
ok = padron.DescargarConstancia(cuit, filename)
print 'ok' if ok else "error", padron.Excepcion
if '--mostrar' in sys.argv:
padron.MostrarPDF(archivo=filename,
imprimir='--imprimir' in sys.argv)
else:
ok = padron.Buscar(cuit)
if ok:
print "Denominacion:", padron.denominacion
print "IVA:", padron.imp_iva
print "Ganancias:", padron.imp_ganancias
print "Monotributo:", padron.monotributo
print "Integrante Soc.:", padron.integrante_soc
print "Empleador", padron.empleador
print "Actividad Monotributo:", padron.actividad_monotributo
print "Categoria IVA:", padron.cat_iva
padron.ConsultarDomicilios(cuit)
for dom in padron.domicilios:
print dom
else:
print padron.Excepcion
print padron.Traceback
t1 = time.time()
if '--trace' in sys.argv:
print "tiempo", t1 -t0
| reingart/pyafipws | padron.py | Python | gpl-3.0 | 25,098 |
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2014-2017 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
from openquake.hazardlib.gsim.garcia_2005 import (
GarciaEtAl2005SSlab,
GarciaEtAl2005SSlabVert,
)
from openquake.hazardlib.tests.gsim.utils import BaseGSIMTestCase
class GarciaEtAl2005SSlabTestCase(BaseGSIMTestCase):
GSIM_CLASS = GarciaEtAl2005SSlab
# Test data generated from Fortran implementation
# provided by Daniel Garcia
def test_mean(self):
self.check('GA05/GA05SSlab_MEAN.csv', max_discrep_percentage=0.2)
def test_std_total(self):
self.check('GA05/GA05SSlab_STD_TOTAL.csv', max_discrep_percentage=0.1)
def test_std_intra(self):
self.check('GA05/GA05SSlab_STD_INTRA.csv', max_discrep_percentage=0.1)
def test_std_inter(self):
self.check('GA05/GA05SSlab_STD_INTER.csv', max_discrep_percentage=0.1)
class GarciaEtAl2005SSlabVertTestCase(BaseGSIMTestCase):
GSIM_CLASS = GarciaEtAl2005SSlabVert
# Test data generated from Fortran implementation
# provided by Daniel Garcia
def test_mean(self):
self.check('GA05/GA05SSlabV_MEAN.csv', max_discrep_percentage=0.2)
def test_std_total(self):
self.check('GA05/GA05SSlabV_STD_TOTAL.csv', max_discrep_percentage=0.1)
def test_std_intra(self):
self.check('GA05/GA05SSlabV_STD_INTRA.csv', max_discrep_percentage=0.1)
def test_std_inter(self):
self.check('GA05/GA05SSlabV_STD_INTER.csv', max_discrep_percentage=0.1)
| gem/oq-hazardlib | openquake/hazardlib/tests/gsim/garcia_2005_test.py | Python | agpl-3.0 | 2,163 |
a = {}
i = 0
while True :
a[i] = '1' * 100
i = i + 1
| watashi/zoj | judge_client/client/testdata/mle.py | Python | gpl-3.0 | 61 |
from google.appengine.ext import db
from google.appengine.api import memcache
class Stats(db.Model):
reviewTimeUnit = db.IntegerProperty(required=True) #milliseconds
reviewTimeUnitWeight = db.IntegerProperty(default=1)
def setDefaultStats():
stats = getStats()
stats.reviewTimeUnit = 30 * 24 * 3600 * 1000
stats.reviewTimeUnitWeight = 0
setStats(stats)
return stats
def createStats():
stats = Stats(key_name='stats', reviewTimeUnit = 30 * 24 * 3600 * 1000)
stats.put()
protobuf = db.model_to_protobuf(stats)
memcache.set('stats', protobuf)
return stats
def getStats():
protobuf = memcache.get('stats')
if protobuf is not None:
return db.model_from_protobuf(protobuf)
else:
stats = Stats.get_by_key_name('stats')
if stats is not None:
protobuf = db.model_to_protobuf(stats)
memcache.set('stats', protobuf)
return stats
return createStats()
def setStats(stats):
stats.put()
protobuf = db.model_to_protobuf(stats)
memcache.set('stats', protobuf)
def getReviewTimeUnit():
stats = getStats()
return stats.reviewTimeUnit
def setReviewTimeUnit(value):
stats = getStats()
stats.reviewTimeUnit = value
setStats(stats)
| wighawag/ubh | app/stats/model.py | Python | agpl-3.0 | 1,324 |
import os
import re
import random
import datetime
class AlphaSubstBaseMLBootstrap:
"This package will perform bootstrap acoording to its parameters, and encapsulate the baseML functionality"
def __init__(self, ResultsFolder):
"Doc Holder"
self.ResultsFolder = ResultsFolder
self.BaseMLBranchDesc = []
self.TransRatio = ""
self.RateMatrix = []
self.BaseFreq = []
self.RateParameters = []
self.RateParameterHeaders = []
self.GotExtraBaseML = 0
def RunBaseML(self,BaseMLLoc,UserRandomKey,GalaxyLocation):
"This function will execute baseml and return the results for the branch description"
op = os.popen(BaseMLLoc + "baseml " + GalaxyLocation + "tools/mdea/BaseMLWork/" + str(UserRandomKey) + "-baseml.ctl")
def FinalCleanUp(self,BaseMLLocation,GalaxyLocation,UserRandomKey):
"This function will clear the tree and the baseml.ctl files"
op = os.remove(GalaxyLocation + 'tools/mdea/BaseMLWork/' + str(UserRandomKey) + "-baseml.ctl")
op = os.remove(GalaxyLocation + 'tools/mdea/BaseMLWork/' + str(UserRandomKey) + "-tmp.tree")
self.DeleteOldFiles(GalaxyLocation,3)
def DeleteOldFiles(self,DirectoryToSearch,DaysOld):
"This program will search the work directory and delete files that are older than 3 days"
CorrectFormatSearch = re.compile('^[0-9]{4}-[0-9]{2}-[0-9]{2}-[0-9]+-(baseml|tmp)')
WorkDirectory = DirectoryToSearch + 'tools/mdea/BaseMLWork/'
TodaysDateValue = int(self.DateToDays(str(datetime.date.today())))
DateDifference = 0
#Open the directory and return the file names
for FileName in os.listdir(WorkDirectory):
if CorrectFormatSearch.search(FileName):
FilesDateValue = int(self.DateToDays(FileName))
DateDifference = TodaysDateValue - FilesDateValue
if DateDifference > int(DaysOld):
op = os.remove(WorkDirectory + str(FileName))
def MonthToDays(self,MonthValue):
"This returns how many days have passed in the month - no leap year support as yet"
DaysInMonths = [31,28,31,30,31,30,31,31,30,31,30,31]
MonthDays = 0
for MonthIndex in range(0,MonthValue - 1):
MonthDays += int(DaysInMonths[MonthIndex])
return MonthDays
def DateToDays(self,DateString):
DateSplitter = re.compile("-")
aDateParts = DateSplitter.split(DateString)
Years = int(aDateParts[0][2:]) * 365
Months = int(self.MonthToDays(int(aDateParts[1])))
Days = int(aDateParts[2])
return Years + Months + Days
def ReturnBaseMLFile(self,UserRandomKey,GalaxyLocation):
"This function will return the contents of the baseml output file, enclosed in a textarea"
FileResults = ""
BaseMLOut = open(GalaxyLocation + 'tools/mdea/BaseMLWork/' + str(UserRandomKey) + "-tmp.out")
FileResults = BaseMLOut.read()
return FileResults
def ScoreBaseML(self,BaseMLLoc,UserRandomKey,BranchDescriptions,GalaxyLocation,GetSE,DoExtraBaseML,SubstModel):
"This function will read tmp.out and set an array of the scores - return a 1 if it was successful - 0 otherwise"
SuccessfulRun = 0
SubstModel = int(SubstModel)
ScoreLineCheck = re.compile('[0-99]\.\.[0-99]')
SESearch = re.compile('SEs for parameters')
BaseFreqSearch = re.compile('^Average')
KappaSearch = re.compile('^Parameters \(kappa\)')
RateParameterSearch = re.compile('Rate parameters')
ThreeSpaceSplitter = re.compile(' ')
TwoSpaceSplitter = re.compile(' ')
OneSpaceSplitter = re.compile(" ")
SpaceSplitter = re.compile("\s")
CommaJoin = re.compile(",")
NewLineSplitter = re.compile('\n')
TransSearch = re.compile('Ts\/Tv');
EqualSplit = re.compile('=')
DoExtraBaseML = int(DoExtraBaseML)
TransParts = []
RateParts = []
TransRatio = 0
FileContents = []
BranchNameArray = []
BranchScoreArray = []
BaseMLScoreResults = []
FinalBranchEntry = []
SEResults = []
BaseMLOut = open(GalaxyLocation + 'tools/mdea/BaseMLWork/' + str(UserRandomKey) + "-tmp.out")
FileContents = NewLineSplitter.split(BaseMLOut.read())
for FileIndex in range(0,len(FileContents)):
if ScoreLineCheck.search(FileContents[FileIndex]):
FileContents[FileIndex] = FileContents[FileIndex].strip()
FileContents[FileIndex+1] = FileContents[FileIndex+1].strip()
BranchNameArray = ThreeSpaceSplitter.split(FileContents[FileIndex])
while TwoSpaceSplitter.search(FileContents[FileIndex+1]):
FileContents[FileIndex+1] = TwoSpaceSplitter.sub(" ", FileContents[FileIndex+1])
BranchScoreArray = OneSpaceSplitter.split(FileContents[FileIndex+1])
if int(GetSE) == 0 and DoExtraBaseML == 0:
break
if SESearch.search(FileContents[FileIndex]):
FileContents[FileIndex+1] = FileContents[FileIndex+1].strip()
while TwoSpaceSplitter.search(FileContents[FileIndex+1]):
FileContents[FileIndex+1] = TwoSpaceSplitter.sub(" ", FileContents[FileIndex+1])
SEResults = OneSpaceSplitter.split(FileContents[FileIndex+1])
if DoExtraBaseML == 0:
break
if DoExtraBaseML == 1 and self.GotExtraBaseML == 0:
if BaseFreqSearch.search(FileContents[FileIndex]):
BaseFreqLine = FileContents[FileIndex][10:]
self.BaseFreq = SpaceSplitter.split(BaseFreqLine.strip())
if KappaSearch.search(FileContents[FileIndex]):
FileIndex += 1
KappaLine = FileContents[FileIndex].strip()
if SubstModel != 6:
self.RateParameters = [KappaLine]
self.RateParameterHeaders = ['Kappa']
else:
KappaValues = TwoSpaceSplitter.split(KappaLine)
self.RateParameters = [KappaValues[0],KappaValues[1]]
self.RateParameterHeaders = ['Kappa1','Kappa2']
if SubstModel == 7 or SubstModel == 8: #Rate Matrix for REV and UNREST
if TransSearch.search(FileContents[FileIndex]):
TransParts = EqualSplit.split(FileContents[FileIndex])
TransRatio = TransParts[1].strip()
self.TransRatio = float(TransRatio)
#Next 4 lines are the rate matrix data
for RateLineIndex in range(0,4):
self.RateMatrix.append([])
RateLine = FileContents[FileIndex+RateLineIndex+1]
RateLine = RateLine.strip()
RateParts = ThreeSpaceSplitter.split(RateLine)
for RatePartIndex in range(0,4):
RateParts[RatePartIndex] = RateParts[RatePartIndex].strip()
self.RateMatrix[RateLineIndex].append(float(RateParts[RatePartIndex]))
if RateParameterSearch.search(FileContents[FileIndex]):
RateParameterLine = FileContents[FileIndex][17:]
RateParameterLine = RateParameterLine.strip()
self.RateParameters = TwoSpaceSplitter.split(RateParameterLine)
if SubstModel == 7: self.RateParameterHeaders = ['A','B','C','D','E']
elif SubstModel == 8: self.RateParameterHeaders = []
else:
self.RateMatrix = ""
if SubstModel == 0 or SubstModel == 2:
self.RateParameters = ""
self.RateParameterHeaders = ""
BaseMLOut.close()
#Get BaseML ordered branch descriptions - if they aren't already present
if len(self.BaseMLBranchDesc) == 0:
for Branch in BranchNameArray:
Branch = OneSpaceSplitter.sub("",Branch)
self.BaseMLBranchDesc.append(Branch)
if len(BranchNameArray) != 0:
SuccessfulRun = 1
self.BaseMLScores = BranchScoreArray
self.SEScores = SEResults
else:
SuccessfulRun = 0
self.BaseMLScores = []
self.SEScores = []
self.CleanData(UserRandomKey,GalaxyLocation + "tools/mdea/BaseMLWork/")
return SuccessfulRun
def CleanData(self,UserRandomKey,WorkDir):
"This function will remove the tmp.out and tmp.seq files"
op = os.remove(WorkDir + str(UserRandomKey) + "-tmp.out")
op = os.remove(WorkDir + str(UserRandomKey) + "-tmp.seq")
def WriteDBSAlignment(self,SequenceLength,SequenceCount,SequenceData,UserRandomKey,DoBootStrap):
"This function will bootstrap and write a tmp.seq file for baseML"
if str(DoBootStrap) == "1":
StrappedSequence = self.DoStrap(SequenceData,SequenceLength,SequenceCount)
else:
StrappedSequence = " " + str(SequenceCount) + " " + str(SequenceLength) + "\n"
for Index in range(0,len(SequenceData)):
StrappedSequence += ">Sequence" + str(Index + 1) + "\n"
StrappedSequence += str(SequenceData[Index]) + "\n"
#Check Strapped sequence - for the presence of all nucleotides
self.WriteFile("tools/mdea/BaseMLWork/" + str(UserRandomKey) + "-tmp.seq",StrappedSequence)
def StrapSequence(self,StartSeqArray,SequenceLength,SequenceCount,UserRandomKey,DoBootStrap):
"This will create a sequence replica from a starting Sequence (StartingSequence) - and write it as a tmp.out to the baseml work directory"
SequenceLine = ""
Splitter = re.compile('\n')
#Bootstrap and return
if str(DoBootStrap) == "1":
StrappedSequence = self.DoStrap(StartSeqArray,SequenceLength,SequenceCount)
else:
StrappedSequence = " " + str(SequenceCount) + " " + str(SequenceLength) + "\n" #BaseML Style header
for BlankArrayIndex in range(0,int(SequenceCount)):
StrappedSequence += ">Sequence" + str(BlankArrayIndex + 1) + "\n"
StrappedSequence += str(StartSeqArray[BlankArrayIndex]) + "\n"
self.WriteFile("tools/mdea/BaseMLWork/" + str(UserRandomKey) + "-tmp.seq",StrappedSequence)
def DoStrap(self,Sequences,SequenceLength,SequenceCount):
"Bootstraps a sequence array"
FinalSequenceArray = []
FinalSeqFile = ""
for BlankArrayIndex in range(0,int(SequenceCount)):
FinalSequenceArray.append('') #Initialize a blank array
#Bootstrap the sequences
for SequenceLengthIndex in range(0,int(SequenceLength)):
SamplePosition = random.randrange(0,int(SequenceLength),1)
for SequenceCountIndex in range(0,int(SequenceCount)):
FinalSequenceArray[SequenceCountIndex] += Sequences[SequenceCountIndex][SamplePosition]
#Assemble the replica and return to caller
FinalSeqFile = " " + str(SequenceCount) + " " + str(SequenceLength) + "\n" #BaseML Style header
for BlankArrayIndex in range(0,int(SequenceCount)):
FinalSeqFile += ">Sequence" + str(BlankArrayIndex + 1) + "\n"
FinalSeqFile += str(FinalSequenceArray[BlankArrayIndex][0:]) + "\n"
return FinalSeqFile
def WriteFile(self,FileName,Data):
"This function will write the data passed to a file on the file system"
TargetFile = file(FileName, "w")
TargetFile.write(Data)
TargetFile.close()
| jmchilton/galaxy-central | tools/mdea/AlphaSubstBaseMLBootstrap.py | Python | mit | 11,958 |
#!/usr/bin/python
import subprocess
import os
import shutil
import pty
master, slave = pty.openpty()
args = ('stdin1.py')
# popen = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd='.')
# http://stackoverflow.com/questions/5411780/python-run-a-daemon-sub-process-read-stdout/5413588#5413588
# not working
popen = subprocess.Popen(args, shell=True, stdin=subprocess.PIPE, stdout=slave, stderr=slave, close_fds=True, cwd='.')
stdout = os.fdopen(master)
# set the O_NONBLOCK flag of p.stdout file descriptor:
# flags = fcntl(popen1.stdout, F_GETFL) # get current popen1.stdout flags
# fcntl(popen1.stdout, F_SETFL, flags | O_NONBLOCK)
popen.stdin.write("this is line 0\n")
# line = popen.stdout.readline()
# line = stdout.readline()
# print "line = %s" % line
out, err = popen.communicate(input = "this is line 1\nthis is line 2\n\d")
if err != None:
print 'errput = %s' % str(err)
print "output = %s" % str(out)
out2 = stdout.read()
print "output = %s" % str(out)
subprocess.call(['echo', ''])
| jtraver/dev | python/subprocess/write2.py | Python | mit | 1,052 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'HoverImagePlugin'
db.create_table('cmsplugin_hoverimageplugin', (
('cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)),
('image', self.gf('django.db.models.fields.files.ImageField')(max_length=100)),
('hover', self.gf('django.db.models.fields.files.ImageField')(max_length=100)),
('url', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('page_link', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cms.Page'], null=True, blank=True)),
('alt', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('longdesc', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
))
db.send_create_signal('cmsplugin_hoverimage', ['HoverImagePlugin'])
def backwards(self, orm):
# Deleting model 'HoverImagePlugin'
db.delete_table('cmsplugin_hoverimageplugin')
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 3, 8, 0, 0)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('site', 'tree_id', 'lft')", 'object_name': 'Page'},
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderator_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '1', 'blank': 'True'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cmsplugin_hoverimage.hoverimageplugin': {
'Meta': {'object_name': 'HoverImagePlugin', 'db_table': "'cmsplugin_hoverimageplugin'", '_ormbases': ['cms.CMSPlugin']},
'alt': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'hover': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'longdesc': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'page_link': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cmsplugin_hoverimage'] | CodeMill/cmsplugin-hoverimage | cmsplugin_hoverimage/migrations/0001_initial.py | Python | mit | 7,784 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mips.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| aniafijarczyk/django-mips | manage.py | Python | mit | 247 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import data_catalog_ptm_create_taxonomy
def test_create_taxonomy(capsys, project_id: str, random_taxonomy_display_name: str):
data_catalog_ptm_create_taxonomy.create_taxonomy(
project_id=project_id,
location_id="us",
display_name=random_taxonomy_display_name,
)
out, _ = capsys.readouterr()
assert f"Created taxonomy projects/{project_id}/locations/us/taxonomies/" in out
| googleapis/python-datacatalog | samples/snippets/data_catalog_ptm_create_taxonomy_test.py | Python | apache-2.0 | 992 |
import json
import datetime
from flask import Flask, render_template, request, redirect, url_for, session
from stock_tracer.common import MQClient
app = Flask(__name__)
app.secret_key = '\x14o\x93\xa5\xfe\xc1\xa8\xd2\x1b\xd1H\xd1\xfb=\xfd\x02\xff\x01\xa6 \x14g\xebr'
@app.route("/")
def home():
request_body = {}
request_body['action'] = 'list_quotes'
request_body['payload'] = {'days': 20}
client = MQClient()
reply = client.call(request_body)
if 'error' in reply:
return "error!{}".format(str(reply))
stock_quotes = json.loads(reply)
last_update_time = stock_quotes.pop('last_update', None)
if last_update_time:
last_update_time = datetime.datetime.strptime(last_update_time, '%Y-%m-%d %H:%M:%S')
timediff = datetime.datetime.utcnow() - last_update_time
date_header = []
for stock_quote in stock_quotes.itervalues():
for quote in stock_quote['quotes']:
if quote['date'] not in date_header:
date_header.append(quote['date'])
date_header.sort()
stock_rows = []
for key, stock_quote in stock_quotes.iteritems():
stock_row_item = {'id': key}
stock_row_item['exchange'] = stock_quote['exchange']
stock_row_item['symbol'] = stock_quote['symbol']
stock_row_item['change_percentages'] = [0] * len(date_header)
for quote in stock_quote['quotes']:
index = date_header.index(quote['date'])
stock_row_item['change_percentages'][index] = quote['change_percentage']
stock_rows.append(stock_row_item)
return render_template('home.html',
date_header=date_header,
stock_rows=stock_rows,
last_update=int(timediff.total_seconds() / 60) if last_update_time else None,
message=session.pop('message', None))
@app.route('/add', methods=['POST'])
def add_stock():
app.logger.info(request.form['exchange'])
app.logger.info(request.form['symbol'])
client = MQClient()
request_body = {}
request_body['action'] = 'add_stock'
request_body['payload'] = {}
request_body['payload']['exchange'] = request.form['exchange']
request_body['payload']['symbol'] = request.form['symbol']
app.logger.info(request_body)
reply = client.call(request_body)
if 'error' in reply:
session['message'] = {'type': 'danger',
'value': '<strong>Error!</strong> Stock {0}:{1} is not valid. Please double check.'.format(request.form['exchange'], request.form['symbol'])}
else:
stock = json.loads(reply)
session['message'] = {'type': 'success',
'value': "New Stock {0}:{1} has been added.".format(stock['exchange'], stock['symbol'])}
return redirect(url_for('home'))
@app.route('/stock/<stock_id>')
def stock_detail(stock_id):
app.logger.info("building view for {}".format(stock_id))
request_body = {}
request_body['action'] = 'list_quotes'
request_body['payload'] = {'days': 100}
request_body['payload']['stock_id'] = stock_id
client = MQClient()
reply = client.call(request_body)
if 'error' in reply:
return "error!{}".format(str(reply))
stock_detail = json.loads(reply)
return render_template('stock_detail.html', stock=stock_detail[stock_id])
@app.route('/refresh_quotes', methods=['post'])
def refresh_quotes():
client = MQClient()
reply = client.call({'action': 'refresh_quotes'})
if 'error' in reply:
return reply
return redirect(url_for('home'))
if __name__ == "__main__":
app.run()
| ning-yang/stock_tracer | UI/home.py | Python | gpl-3.0 | 3,670 |
"""Weight Boosting
This module contains weight boosting estimators for both classification and
regression.
The module structure is the following:
- The ``BaseWeightBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ from each other in the loss function that is optimized.
- ``AdaBoostClassifier`` implements adaptive boosting (AdaBoost-SAMME) for
classification problems.
- ``AdaBoostRegressor`` implements adaptive boosting (AdaBoost.R2) for
regression problems.
"""
# Authors: Noel Dawe <[email protected]>
# Gilles Louppe <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# Arnaud Joly <[email protected]>
#
# Licence: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from numpy.core.umath_tests import inner1d
from .base import BaseEnsemble
from ..base import ClassifierMixin, RegressorMixin, is_regressor
from ..externals import six
from ..externals.six.moves import zip
from ..externals.six.moves import xrange as range
from .forest import BaseForest
from ..tree import DecisionTreeClassifier, DecisionTreeRegressor
from ..tree.tree import BaseDecisionTree
from ..tree._tree import DTYPE
from ..utils import check_array, check_X_y, check_random_state
from ..metrics import accuracy_score, r2_score
from sklearn.utils.validation import has_fit_parameter, check_is_fitted
__all__ = [
'AdaBoostClassifier',
'AdaBoostRegressor',
]
class BaseWeightBoosting(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for AdaBoost estimators.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator=None,
n_estimators=50,
estimator_params=tuple(),
learning_rate=1.,
random_state=None):
super(BaseWeightBoosting, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.learning_rate = learning_rate
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier/regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR. The dtype is
forced to DTYPE from tree._tree if the base classifier of this
ensemble weighted boosting classifier is a tree or forest.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check parameters
if self.learning_rate <= 0:
raise ValueError("learning_rate must be greater than zero")
if (self.base_estimator is None or
isinstance(self.base_estimator, (BaseDecisionTree,
BaseForest))):
dtype = DTYPE
accept_sparse = 'csc'
else:
dtype = None
accept_sparse = ['csr', 'csc']
X, y = check_X_y(X, y, accept_sparse=accept_sparse, dtype=dtype,
y_numeric=is_regressor(self))
if sample_weight is None:
# Initialize weights to 1 / n_samples
sample_weight = np.empty(X.shape[0], dtype=np.float64)
sample_weight[:] = 1. / X.shape[0]
else:
# Normalize existing weights
sample_weight = sample_weight / sample_weight.sum(dtype=np.float64)
# Check that the sample weights sum is positive
if sample_weight.sum() <= 0:
raise ValueError(
"Attempting to fit with a non-positive "
"weighted number of samples.")
# Check parameters
self._validate_estimator()
# Clear any previous fit results
self.estimators_ = []
self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float64)
self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float64)
for iboost in range(self.n_estimators):
# Boosting step
sample_weight, estimator_weight, estimator_error = self._boost(
iboost,
X, y,
sample_weight)
# Early termination
if sample_weight is None:
break
self.estimator_weights_[iboost] = estimator_weight
self.estimator_errors_[iboost] = estimator_error
# Stop if error is zero
if estimator_error == 0:
break
sample_weight_sum = np.sum(sample_weight)
# Stop if the sum of sample weights has become non-positive
if sample_weight_sum <= 0:
break
if iboost < self.n_estimators - 1:
# Normalize
sample_weight /= sample_weight_sum
return self
@abstractmethod
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Warning: This method needs to be overriden by subclasses.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
pass
def staged_score(self, X, y, sample_weight=None):
"""Return staged scores for X, y.
This generator method yields the ensemble score after each iteration of
boosting and therefore allows monitoring, such as to determine the
score on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like, shape = [n_samples]
Labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
z : float
"""
for y_pred in self.staged_predict(X):
if isinstance(self, ClassifierMixin):
yield accuracy_score(y, y_pred, sample_weight=sample_weight)
else:
yield r2_score(y, y_pred, sample_weight=sample_weight)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
try:
norm = self.estimator_weights_.sum()
return (sum(weight * clf.feature_importances_ for weight, clf
in zip(self.estimator_weights_, self.estimators_))
/ norm)
except AttributeError:
raise AttributeError(
"Unable to compute feature importances "
"since base_estimator does not have a "
"feature_importances_ attribute")
def _validate_X_predict(self, X):
"""Ensure that X is in the proper format"""
if (self.base_estimator is None or
isinstance(self.base_estimator,
(BaseDecisionTree, BaseForest))):
X = check_array(X, accept_sparse='csr', dtype=DTYPE)
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
return X
def _samme_proba(estimator, n_classes, X):
"""Calculate algorithm 4, step 2, equation c) of Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
proba = estimator.predict_proba(X)
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps
log_proba = np.log(proba)
return (n_classes - 1) * (log_proba - (1. / n_classes)
* log_proba.sum(axis=1)[:, np.newaxis])
class AdaBoostClassifier(BaseWeightBoosting, ClassifierMixin):
"""An AdaBoost classifier.
An AdaBoost [1] classifier is a meta-estimator that begins by fitting a
classifier on the original dataset and then fits additional copies of the
classifier on the same dataset but where the weights of incorrectly
classified instances are adjusted such that subsequent classifiers focus
more on difficult cases.
This class implements the algorithm known as AdaBoost-SAMME [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeClassifier)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required, as well as proper `classes_`
and `n_classes_` attributes.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each classifier by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
algorithm : {'SAMME', 'SAMME.R'}, optional (default='SAMME.R')
If 'SAMME.R' then use the SAMME.R real boosting algorithm.
``base_estimator`` must support calculation of class probabilities.
If 'SAMME' then use the SAMME discrete boosting algorithm.
The SAMME.R algorithm typically converges faster than SAMME,
achieving a lower test error with fewer boosting iterations.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes]
The classes labels.
n_classes_ : int
The number of classes.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Classification error for each estimator in the boosted
ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostRegressor, GradientBoostingClassifier, DecisionTreeClassifier
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
algorithm='SAMME.R',
random_state=None):
super(AdaBoostClassifier, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.algorithm = algorithm
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
``1 / n_samples``.
Returns
-------
self : object
Returns self.
"""
# Check that algorithm is supported
if self.algorithm not in ('SAMME', 'SAMME.R'):
raise ValueError("algorithm %s is not supported" % self.algorithm)
# Fit
return super(AdaBoostClassifier, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostClassifier, self)._validate_estimator(
default=DecisionTreeClassifier(max_depth=1))
# SAMME-R requires predict_proba-enabled base estimators
if self.algorithm == 'SAMME.R':
if not hasattr(self.base_estimator_, 'predict_proba'):
raise TypeError(
"AdaBoostClassifier with algorithm='SAMME.R' requires "
"that the weak learner supports the calculation of class "
"probabilities with a predict_proba method.\n"
"Please change the base estimator or set "
"algorithm='SAMME' instead.")
if not has_fit_parameter(self.base_estimator_, "sample_weight"):
raise ValueError("%s doesn't support sample_weight."
% self.base_estimator_.__class__.__name__)
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Perform a single boost according to the real multi-class SAMME.R
algorithm or to the discrete SAMME algorithm and return the updated
sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
if self.algorithm == 'SAMME.R':
return self._boost_real(iboost, X, y, sample_weight)
else: # elif self.algorithm == "SAMME":
return self._boost_discrete(iboost, X, y, sample_weight)
def _boost_real(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME.R real algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict_proba = estimator.predict_proba(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
y_predict = self.classes_.take(np.argmax(y_predict_proba, axis=1),
axis=0)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
# Construct y coding as described in Zhu et al [2]:
#
# y_k = 1 if c == k else -1 / (K - 1)
#
# where K == n_classes_ and c, k in [0, K) are indices along the second
# axis of the y coding with c being the index corresponding to the true
# class label.
n_classes = self.n_classes_
classes = self.classes_
y_codes = np.array([-1. / (n_classes - 1), 1.])
y_coding = y_codes.take(classes == y[:, np.newaxis])
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba = y_predict_proba # alias for readability
proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps
# Boost weight using multi-class AdaBoost SAMME.R alg
estimator_weight = (-1. * self.learning_rate
* (((n_classes - 1.) / n_classes) *
inner1d(y_coding, np.log(y_predict_proba))))
# Only boost the weights if it will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, 1., estimator_error
def _boost_discrete(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME discrete algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict = estimator.predict(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
n_classes = self.n_classes_
# Stop if the error is at least as bad as random guessing
if estimator_error >= 1. - (1. / n_classes):
self.estimators_.pop(-1)
if len(self.estimators_) == 0:
raise ValueError('BaseClassifier in AdaBoostClassifier '
'ensemble is worse than random, ensemble '
'can not be fit.')
return None, None, None
# Boost weight using multi-class AdaBoost SAMME alg
estimator_weight = self.learning_rate * (
np.log((1. - estimator_error) / estimator_error) +
np.log(n_classes - 1.))
# Only boost the weights if I will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight * incorrect *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, estimator_weight, estimator_error
def predict(self, X):
"""Predict classes for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted classes.
"""
pred = self.decision_function(X)
if self.n_classes_ == 2:
return self.classes_.take(pred > 0, axis=0)
return self.classes_.take(np.argmax(pred, axis=1), axis=0)
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted classes.
"""
n_classes = self.n_classes_
classes = self.classes_
if n_classes == 2:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(pred > 0, axis=0))
else:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(
np.argmax(pred, axis=1), axis=0))
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
pred = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
pred = sum((estimator.predict(X) == classes).T * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
pred /= self.estimator_weights_.sum()
if n_classes == 2:
pred[:, 0] *= -1
return pred.sum(axis=1)
return pred
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each boosting iteration.
This method allows monitoring (i.e. determine error on testing set)
after each boosting iteration.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_pred = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_pred = estimator.predict(X)
current_pred = (current_pred == classes).T * weight
if pred is None:
pred = current_pred
else:
pred += current_pred
if n_classes == 2:
tmp_pred = np.copy(pred)
tmp_pred[:, 0] *= -1
yield (tmp_pred / norm).sum(axis=1)
else:
yield pred / norm
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
check_is_fitted(self, "n_classes_")
n_classes = self.n_classes_
X = self._validate_X_predict(X)
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
proba = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
proba = sum(estimator.predict_proba(X) * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
proba /= self.estimator_weights_.sum()
proba = np.exp((1. / (n_classes - 1)) * proba)
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
def staged_predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
This generator method yields the ensemble predicted class probabilities
after each iteration of boosting and therefore allows monitoring, such
as to determine the predicted class probabilities on a test set after
each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : generator of array, shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
X = self._validate_X_predict(X)
n_classes = self.n_classes_
proba = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_proba = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_proba = estimator.predict_proba(X) * weight
if proba is None:
proba = current_proba
else:
proba += current_proba
real_proba = np.exp((1. / (n_classes - 1)) * (proba / norm))
normalizer = real_proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
real_proba /= normalizer
yield real_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the weighted mean predicted class log-probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
return np.log(self.predict_proba(X))
class AdaBoostRegressor(BaseWeightBoosting, RegressorMixin):
"""An AdaBoost regressor.
An AdaBoost [1] regressor is a meta-estimator that begins by fitting a
regressor on the original dataset and then fits additional copies of the
regressor on the same dataset but where the weights of instances are
adjusted according to the error of the current prediction. As such,
subsequent regressors focus more on difficult cases.
This class implements the algorithm known as AdaBoost.R2 [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeRegressor)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each regressor by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
loss : {'linear', 'square', 'exponential'}, optional (default='linear')
The loss function to use when updating the weights after each
boosting iteration.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Regression error for each estimator in the boosted ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostClassifier, GradientBoostingRegressor, DecisionTreeRegressor
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
loss='linear',
random_state=None):
super(AdaBoostRegressor, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.loss = loss
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (real numbers).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check loss
if self.loss not in ('linear', 'square', 'exponential'):
raise ValueError(
"loss must be 'linear', 'square', or 'exponential'")
# Fit
return super(AdaBoostRegressor, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostRegressor, self)._validate_estimator(
default=DecisionTreeRegressor(max_depth=3))
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost for regression
Perform a single boost according to the AdaBoost.R2 algorithm and
return the updated sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The regression error for the current boost.
If None then boosting has terminated early.
"""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
generator = check_random_state(self.random_state)
# Weighted sampling of the training set with replacement
# For NumPy >= 1.7.0 use np.random.choice
cdf = sample_weight.cumsum()
cdf /= cdf[-1]
uniform_samples = generator.random_sample(X.shape[0])
bootstrap_idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
bootstrap_idx = np.array(bootstrap_idx, copy=False)
# Fit on the bootstrapped sample and obtain a prediction
# for all samples in the training set
estimator.fit(X[bootstrap_idx], y[bootstrap_idx])
y_predict = estimator.predict(X)
error_vect = np.abs(y_predict - y)
error_max = error_vect.max()
if error_max != 0.:
error_vect /= error_max
if self.loss == 'square':
error_vect **= 2
elif self.loss == 'exponential':
error_vect = 1. - np.exp(- error_vect)
# Calculate the average loss
estimator_error = (sample_weight * error_vect).sum()
if estimator_error <= 0:
# Stop if fit is perfect
return sample_weight, 1., 0.
elif estimator_error >= 0.5:
# Discard current estimator only if it isn't the only one
if len(self.estimators_) > 1:
self.estimators_.pop(-1)
return None, None, None
beta = estimator_error / (1. - estimator_error)
# Boost weight using AdaBoost.R2 alg
estimator_weight = self.learning_rate * np.log(1. / beta)
if not iboost == self.n_estimators - 1:
sample_weight *= np.power(
beta,
(1. - error_vect) * self.learning_rate)
return sample_weight, estimator_weight, estimator_error
def _get_median_predict(self, X, limit):
# Evaluate predictions of all estimators
predictions = np.array([
est.predict(X) for est in self.estimators_[:limit]]).T
# Sort the predictions
sorted_idx = np.argsort(predictions, axis=1)
# Find index of median prediction for each sample
weight_cdf = self.estimator_weights_[sorted_idx].cumsum(axis=1)
median_or_above = weight_cdf >= 0.5 * weight_cdf[:, -1][:, np.newaxis]
median_idx = median_or_above.argmax(axis=1)
median_estimators = sorted_idx[np.arange(X.shape[0]), median_idx]
# Return median predictions
return predictions[np.arange(X.shape[0]), median_estimators]
def predict(self, X):
"""Predict regression value for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
return self._get_median_predict(X, len(self.estimators_))
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
for i, _ in enumerate(self.estimators_, 1):
yield self._get_median_predict(X, limit=i)
| jmetzen/scikit-learn | sklearn/ensemble/weight_boosting.py | Python | bsd-3-clause | 40,739 |
# Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from .caffe_train import CaffeTrainTask,CaffeTrainSanityCheckError
from google.protobuf import text_format
from digits.config import config_value
# Must import after importing digit.config
import caffe
import caffe_pb2
def check_positive(desc, stage):
network = caffe_pb2.NetParameter()
text_format.Merge(desc, network)
CaffeTrainTask.net_sanity_check(network, stage)
def check_negative(desc, stage):
network = caffe_pb2.NetParameter()
text_format.Merge(desc, network)
try:
CaffeTrainTask.net_sanity_check(network, stage)
except CaffeTrainSanityCheckError:
pass
class TestCaffeNetSanityCheck():
# positive cases
def test_std_net_train(self):
desc = \
"""
layer {
name: "data"
type: "Data"
top: "data"
top: "label"
include {
phase: TRAIN
}
}
layer {
name: "hidden"
type: 'InnerProduct'
bottom: "data"
top: "output"
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "output"
bottom: "label"
top: "loss"
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "output"
bottom: "label"
top: "accuracy"
include {
phase: TEST
}
}
"""
check_positive(desc, caffe_pb2.TRAIN)
def test_std_net_deploy(self):
desc = \
"""
input: "data"
layer {
name: "hidden"
type: 'InnerProduct'
bottom: "data"
top: "output"
}
"""
check_positive(desc, caffe_pb2.TEST)
def test_ref_label_with_proper_include_directive(self):
desc = \
"""
input: "data"
layer {
name: "hidden"
type: 'InnerProduct'
bottom: "data"
top: "output"
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "output"
bottom: "label"
top: "loss"
include {
phase: TRAIN
}
}
"""
check_positive(desc, caffe_pb2.TEST)
def test_ref_label_with_proper_exclude_directive(self):
desc = \
"""
input: "data"
layer {
name: "hidden"
type: 'InnerProduct'
bottom: "data"
top: "output"
}
layer {
name: "lossExcludedInTest"
type: "SoftmaxWithLoss"
bottom: "output"
bottom: "label"
top: "loss"
exclude {
phase: TEST
}
}
"""
check_positive(desc, caffe_pb2.TEST)
# negative cases
def test_error_ref_label_in_deploy(self):
desc = \
"""
input: "data"
layer {
name: "hidden"
type: 'InnerProduct'
bottom: "data"
top: "output"
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "output"
bottom: "label"
top: "loss"
}
"""
check_negative(desc, caffe_pb2.TEST)
def test_error_ref_unknown_blob(self):
desc = \
"""
input: "data"
layer {
name: "hidden"
type: 'InnerProduct'
bottom: "data"
bottom: "bogusBlob"
top: "output"
}
"""
check_negative(desc, caffe_pb2.TRAIN)
def test_error_ref_unincluded_blob(self):
desc = \
"""
input: "data"
layer {
name: "hidden"
type: 'InnerProduct'
bottom: "data"
top: "output"
include {
phase: TRAIN
}
}
layer {
name: "hidden"
type: 'InnerProduct2'
bottom: "data"
top: "output"
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "output"
bottom: "label"
top: "loss"
include {
phase: TRAIN
}
}
"""
check_negative(desc, caffe_pb2.TEST)
def test_error_ref_excluded_blob(self):
desc = \
"""
input: "data"
layer {
name: "hidden"
type: 'InnerProduct'
bottom: "data"
top: "output"
include {
phase: TRAIN
}
}
layer {
name: "hidden"
type: 'InnerProduct2'
bottom: "data"
top: "output"
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "output"
bottom: "label"
top: "loss"
exclude {
phase: TEST
}
}
"""
check_negative(desc, caffe_pb2.TEST)
| dongjoon-hyun/DIGITS | digits/model/tasks/test_caffe_sanity_checks.py | Python | bsd-3-clause | 5,932 |
#!/usr/bin/env python
import os, sys, json, copy
import multiprocessing
from HapMixWorkflow import *
class SimParams:
"""Object to store all the parameters of the simulation workflow"""
pass
def get_HapMix_params():
"""Get simulation parameters from command line and configuration file into a SimParams object"""
params = SimParams()
# Parameters from command line
options = get_commandline_options()
params.output_dir = options.output_dir
params.tmp_dir = options.tmp_dir
params.mode = options.mode
params.rand_seed = options.rand_seed
# Truth file configuration
if options.truth_config_file:
params.create_truth_bed = True
parse_truth_config_file(options.truth_config_file, params)
else:
params.create_truth_bed = False
params.tum_truth_file = options.tum_truth_file
# Simulation configuration
parse_sim_config_file(options.sim_config_file, params)
return params
def parse_truth_config_file(truth_config_file, params):
"""Parse HapMix configuration file for the synthetic tumor truth bed file into params object"""
config = json.load(open(truth_config_file))
params.CN_distr_file = config["CN_distr_file"]
params.MCC_distr_file = config["MCC_distr_file"]
params.num_distr_file = config["num_distr_file"]
params.len_distr_file = config["len_distr_file"]
params.tum_truth_file = os.path.join(params.output_dir, config["truth_file_name"])
def parse_sim_config_file(sim_config_file, params):
"""Parse HapMix configuration file for the simulation parameters into params object"""
print sim_config_file
config = json.load(open(sim_config_file))
config_dir = os.path.dirname(os.path.realpath(sim_config_file))
# Parameters defining the clonal evolution
params.purity = float(config["purity"])
params.num_clones = config["num_clones"]
if params.num_clones == 1:
params.perc_maj = -1
params.var_het = -1
else:
params.perc_maj = config["perc_majority_clone"]
params.var_het = config["perc_heterogeneous_variants"]
params.tree_format = config["tree_format"]
validate_tree_structure(params.tree_format, params.num_clones)
if isinstance(params.tree_format, (list, tuple)):
params.tree_structure_file = os.path.join(params.tmp_dir, "tree_structure.json")
json.dump(params.tree_format, open(params.tree_structure_file, "w"))
if params.num_clones != 1:
params.name_maj_clone = config["majority_clone"]
params.chromosomes = config["chromosomes"]
if isinstance(params.chromosomes, (list, tuple)):
if len(params.chromosomes) == 0:
print "\nchromosomes in config file should either be a comma separated integers or a keyword all\n"
sys.exit(2)
elif params.chromosomes.strip() != "all":
print "\nchromosomes in config file should either be a comma separated integers or a keyword all\n"
sys.exit(2)
# Parameters for the simulation workflow
params.haplotyped_bam_dir = config["haplotyped_bam_dir"]
params.output_bam_file = config["output_bam_file"]
params.mutate_sv = config["mutate_sv"]
if params.mutate_sv:
params.somatic_vcf = os.path.join(config_dir, config["somatic_vcf"])
else:
params.somatic_vcf = ""
params.hg_file = os.path.join(config_dir, config["hg_file"])
params.bam_depth = config["bam_depth"]
params.ploidy_depth = config["ploidy_depth"]
if not os.path.exists(params.hg_file):
print "\nGenome file does not exist\n"
sys.exit(2)
if not os.path.exists(params.haplotyped_bam_dir):
print "\nDirectory for haplotyped bam files does not exist\n"
sys.exit(2)
if params.mutate_sv and not os.path.exists(params.somatic_vcf):
print "\nSomatic mutation VCF file does not exist\n"
sys.exit(2)
def get_commandline_options():
"""Get user options from command line"""
import argparse
parser = argparse.ArgumentParser(prog='tHapMix v1.0')
required_arguments = parser.add_argument_group('Required arguments (-t and -b mutually exclusive!)')
required_arguments.add_argument("-c", "--sim_config_file", dest="sim_config_file", help="configuration file containing the HapMix simulation parameters")
truth_file_args = required_arguments.add_mutually_exclusive_group(required=True)
truth_file_args.add_argument("-b", "--truth_config_file", dest="truth_config_file", help="configuration file containing the parameters for creating a synthetic truth bed file\n(new truth file is created)")
truth_file_args.add_argument("-t", "--tumor_truth_file", dest="tum_truth_file", help="name of the tumor truth file\n(existing truth file is used)")
required_arguments.add_argument("-o", "--output_dir", dest="output_dir", help="name of the output directory")
required_arguments.add_argument('-m', '--mode', dest='mode', default='local', choices=['sge', 'local'], help="select run mode (local|sge)")
parser.add_argument("--seed", dest="rand_seed", help="Seed for random number generation", default = None, type=int)
options = parser.parse_args()
if not options.sim_config_file:
print("\nConfiguration file for the HapMix simulation is not specified\n")
parser.print_help()
sys.exit(2)
if not os.path.exists(options.sim_config_file):
print("\nConfiguration file for the HapMix simulation does not exist\n")
parser.print_help()
sys.exit(2)
if options.truth_config_file and (not os.path.exists(options.truth_config_file)):
parser.print_help()
print("\nConfiguration file for the tumor truth bed does not exist\n")
sys.exit(2)
if options.tum_truth_file and (not os.path.exists(options.tum_truth_file)):
parser.print_help()
print("\nTumor truth bed file does not exist\n")
sys.exit(2)
# check for samtools and bedtools dependencies
if not which("samtools"):
print("\n samtools package must be installed in the PATH, run sudo apt-get install samtools or alternative!\n")
sys.exit(2)
if not which("bedtools"):
print("\n bedtools package must be installed in the PATH, run sudo apt-get install bedtools or alternative!\n")
sys.exit(2)
if not os.path.exists(options.output_dir):
os.mkdir(options.output_dir)
options.tmp_dir = os.path.join(options.output_dir, "Tmp")
if not os.path.exists(options.tmp_dir):
os.mkdir(options.tmp_dir)
return options
def main():
params = get_HapMix_params()
print vars(params)
if isinstance(params.num_clones, list) or isinstance(params.perc_maj, list) or isinstance(params.var_het, list):
wflow = ProtocolFullWorkflow(params)
else:
wflow = SimFullWorkflow(params)
exitpath=os.path.join(params.output_dir,"hapmix.workflow.exitcode.txt")
if params.mode == "local":
nCores=multiprocessing.cpu_count()
else:
nCores=128
try:
retval=wflow.run(mode=params.mode,
nCores=nCores,
dataDirRoot=params.output_dir,
isContinue="Auto",
isForceContinue=True)
finally:
exitfp=open(exitpath,"w")
exitfp.write("%i\n" % (retval))
exitfp.close()
if __name__=="__main__":
main()
| Illumina/HapMix | bin/submitHapMix.py | Python | gpl-3.0 | 7,446 |
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:6452")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:6452")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a TraderCoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a TraderCoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| ikarutoko/Tradercoin | Contrib/Bitrpc/bitrpc.py | Python | mit | 7,842 |
import unittest
from pbxproj.pbxsections.PBXFileReference import PBXFileReference
class PBXFileReferenceTest(unittest.TestCase):
def testPrintOnSingleLine(self):
obj = {"isa": "PBXFileReference", "name": "something"}
dobj = PBXFileReference().parse(obj)
self.assertEqual(dobj.__repr__(), "{isa = PBXFileReference; name = something; }")
def testSetLastKnownType(self):
dobj = PBXFileReference.create("path")
dobj.set_last_known_file_type('something')
self.assertEqual(dobj.lastKnownFileType, "something")
self.assertIsNone(dobj['explicitFileType'])
def testSetExplicityFileType(self):
dobj = PBXFileReference.create("path")
dobj.set_explicit_file_type('something')
self.assertEqual(dobj.explicitFileType, "something")
self.assertIsNone(dobj['lastKnownFileType'])
def testSetLastTypeRemovesExplicit(self):
dobj = PBXFileReference.create("path")
dobj.set_explicit_file_type('something')
dobj.set_last_known_file_type('something')
self.assertEqual(dobj.lastKnownFileType, "something")
self.assertIsNone(dobj['explicitFileType'])
def testSetExplicitRemovesLastType(self):
dobj = PBXFileReference.create("path")
dobj.set_last_known_file_type('something')
dobj.set_explicit_file_type('something')
self.assertEqual(dobj.explicitFileType, "something")
self.assertIsNone(dobj['lastKnownFileType'])
| dayongxie/mod-pbxproj | tests/pbxsections/TestPBXFileReference.py | Python | bsd-3-clause | 1,491 |
from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Screens.Standby import getReasons
from Components.Sources.StaticText import StaticText
from Components.ChoiceList import ChoiceList, ChoiceEntryComponent
from Components.config import config, configfile
from Components.ActionMap import ActionMap
from Components.Console import Console
from Components.Label import Label
from Components.Pixmap import Pixmap
from Components.ProgressBar import ProgressBar
from Components.SystemInfo import SystemInfo
from Tools.BoundFunction import boundFunction
from Tools.Directories import resolveFilename, SCOPE_PLUGINS
from Tools.Downloader import downloadWithProgress
from Tools.HardwareInfo import HardwareInfo
from Tools.Multiboot import getImagelist, getCurrentImage, getCurrentImageMode, deleteImage, restoreImages
import os
import urllib2
import json
import time
import zipfile
import shutil
import tempfile
from enigma import eEPGCache
def checkimagefiles(files):
return len([x for x in files if 'kernel' in x and '.bin' in x or x in ('uImage', 'rootfs.bin', 'root_cfe_auto.bin', 'root_cfe_auto.jffs2', 'oe_rootfs.bin', 'e2jffs2.img', 'rootfs.tar.bz2', 'rootfs.ubi')]) == 2
class SelectImage(Screen):
def __init__(self, session, *args):
Screen.__init__(self, session)
self.session = session
self.jsonlist = {}
self.imagesList = {}
self.setIndex = 0
self.expanded = []
self.setTitle(_("Multiboot image selector"))
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText()
self["key_yellow"] = StaticText()
self["key_blue"] = StaticText()
self["description"] = StaticText()
self["list"] = ChoiceList(list=[ChoiceEntryComponent('', ((_("Retrieving image list - Please wait...")), "Waiter"))])
self["actions"] = ActionMap(["OkCancelActions", "ColorActions", "DirectionActions", "KeyboardInputActions", "MenuActions"],
{
"ok": self.keyOk,
"cancel": boundFunction(self.close, None),
"red": boundFunction(self.close, None),
"green": self.keyOk,
"yellow": self.keyDelete,
"up": self.keyUp,
"down": self.keyDown,
"left": self.keyLeft,
"right": self.keyRight,
"upRepeated": self.keyUp,
"downRepeated": self.keyDown,
"leftRepeated": self.keyLeft,
"rightRepeated": self.keyRight,
"menu": boundFunction(self.close, True),
}, -1)
self.callLater(self.getImagesList)
def getImagesList(self):
def getImages(path, files):
for file in [x for x in files if os.path.splitext(x)[1] == ".zip" and model in x]:
try:
if checkimagefiles([x.split(os.sep)[-1] for x in zipfile.ZipFile(file).namelist()]):
if "Downloaded Images" not in self.imagesList:
self.imagesList["Downloaded Images"] = {}
self.imagesList["Downloaded Images"][file] = {'link': file, 'name': file.split(os.sep)[-1]}
except:
pass
model = HardwareInfo().get_machine_name()
if not self.imagesList:
if not self.jsonlist:
try:
self.jsonlist = dict(json.load(urllib2.urlopen('https://www.nonsolosat.net/' % model)))
if config.usage.alternative_imagefeed.value:
self.jsonlist.update(dict(json.load(urllib2.urlopen('%s%s' % (config.usage.alternative_imagefeed.value, model)))))
except:
pass
self.imagesList = dict(self.jsonlist)
for media in ['/media/%s' % x for x in os.listdir('/media')] + (['/media/net/%s' % x for x in os.listdir('/media/net')] if os.path.isdir('/media/net') else []):
if not(SystemInfo['HasMMC'] and "/mmc" in media) and os.path.isdir(media):
try:
getImages(media, [os.path.join(media, x) for x in os.listdir(media) if os.path.splitext(x)[1] == ".zip" and model in x])
if "downloaded_images" in os.listdir(media):
media = os.path.join(media, "downloaded_images")
if os.path.isdir(media) and not os.path.islink(media) and not os.path.ismount(media):
getImages(media, [os.path.join(media, x) for x in os.listdir(media) if os.path.splitext(x)[1] == ".zip" and model in x])
for dir in [dir for dir in [os.path.join(media, dir) for dir in os.listdir(media)] if os.path.isdir(dir) and os.path.splitext(dir)[1] == ".unzipped"]:
shutil.rmtree(dir)
except:
pass
list = []
for catagorie in reversed(sorted(self.imagesList.keys())):
if catagorie in self.expanded:
list.append(ChoiceEntryComponent('expanded', ((str(catagorie)), "Expander")))
for image in reversed(sorted(self.imagesList[catagorie].keys())):
list.append(ChoiceEntryComponent('verticalline', ((str(self.imagesList[catagorie][image]['name'])), str(self.imagesList[catagorie][image]['link']))))
else:
for image in self.imagesList[catagorie].keys():
list.append(ChoiceEntryComponent('expandable', ((str(catagorie)), "Expander")))
break
if list:
self["list"].setList(list)
if self.setIndex:
self["list"].moveToIndex(self.setIndex if self.setIndex < len(list) else len(list) - 1)
if self["list"].l.getCurrentSelection()[0][1] == "Expander":
self.setIndex -= 1
if self.setIndex:
self["list"].moveToIndex(self.setIndex if self.setIndex < len(list) else len(list) - 1)
self.setIndex = 0
self.selectionChanged()
else:
self.session.openWithCallback(self.close, MessageBox, _("Cannot find images - please try later"), type=MessageBox.TYPE_ERROR, timeout=3)
def keyOk(self):
currentSelected = self["list"].l.getCurrentSelection()
if currentSelected[0][1] == "Expander":
if currentSelected[0][0] in self.expanded:
self.expanded.remove(currentSelected[0][0])
else:
self.expanded.append(currentSelected[0][0])
self.getImagesList()
elif currentSelected[0][1] != "Waiter":
self.session.openWithCallback(self.getImagesList, FlashImage, currentSelected[0][0], currentSelected[0][1])
def keyDelete(self):
currentSelected = self["list"].l.getCurrentSelection()[0][1]
if not("://" in currentSelected or currentSelected in ["Expander", "Waiter"]):
try:
os.remove(currentSelected)
currentSelected = ".".join([currentSelected[:-4], "unzipped"])
if os.path.isdir(currentSelected):
shutil.rmtree(currentSelected)
self.setIndex = self["list"].getSelectedIndex()
self.imagesList = []
self.getImagesList()
except:
self.session.open(MessageBox, _("Cannot delete downloaded image"), MessageBox.TYPE_ERROR, timeout=3)
def selectionChanged(self):
currentSelected = self["list"].l.getCurrentSelection()
if "://" in currentSelected[0][1] or currentSelected[0][1] in ["Expander", "Waiter"]:
self["key_yellow"].setText("")
else:
self["key_yellow"].setText(_("Delete image"))
if currentSelected[0][1] == "Waiter":
self["key_green"].setText("")
else:
if currentSelected[0][1] == "Expander":
self["key_green"].setText(_("Compress") if currentSelected[0][0] in self.expanded else _("Expand"))
self["description"].setText("")
else:
self["key_green"].setText(_("Flash Image"))
self["description"].setText(currentSelected[0][1])
def keyLeft(self):
self["list"].instance.moveSelection(self["list"].instance.pageUp)
self.selectionChanged()
def keyRight(self):
self["list"].instance.moveSelection(self["list"].instance.pageDown)
self.selectionChanged()
def keyUp(self):
self["list"].instance.moveSelection(self["list"].instance.moveUp)
self.selectionChanged()
def keyDown(self):
self["list"].instance.moveSelection(self["list"].instance.moveDown)
self.selectionChanged()
class FlashImage(Screen):
skin = """<screen position="center,center" size="640,150" flags="wfNoBorder" backgroundColor="#54242424">
<widget name="header" position="5,10" size="e-10,50" font="Regular;40" backgroundColor="#54242424"/>
<widget name="info" position="5,60" size="e-10,130" font="Regular;24" backgroundColor="#54242424"/>
<widget name="progress" position="5,e-39" size="e-10,24" backgroundColor="#54242424"/>
</screen>"""
BACKUP_SCRIPT = resolveFilename(SCOPE_PLUGINS, "Extensions/AutoBackup/settings-backup.sh")
def __init__(self, session, imagename, source):
Screen.__init__(self, session)
self.containerbackup = None
self.containerofgwrite = None
self.getImageList = None
self.downloader = None
self.source = source
self.imagename = imagename
self.reasons = getReasons(session)
self["header"] = Label(_("Backup settings"))
self["info"] = Label(_("Save settings and EPG data"))
self["progress"] = ProgressBar()
self["progress"].setRange((0, 100))
self["progress"].setValue(0)
self["actions"] = ActionMap(["OkCancelActions", "ColorActions"],
{
"cancel": self.abort,
"red": self.abort,
"ok": self.ok,
"green": self.ok,
}, -1)
self.callLater(self.confirmation)
def confirmation(self):
if self.reasons:
self.message = _("%s\nDo you still want to flash image\n%s?") % (self.reasons, self.imagename)
else:
self.message = _("Do you want to flash image\n%s") % self.imagename
if SystemInfo["canMultiBoot"]:
imagesList = getImagelist()
currentimageslot = getCurrentImage()
choices = []
slotdict = {k: v for k, v in SystemInfo["canMultiBoot"].items() if not v['device'].startswith('/dev/sda')}
for x in range(1, len(slotdict) + 1):
choices.append(((_("slot%s - %s (current image) with, backup") if x == currentimageslot else _("slot%s - %s, with backup")) % (x, imagesList[x]['imagename']), (x, "with backup")))
for x in range(1, len(slotdict) + 1):
choices.append(((_("slot%s - %s (current image), without backup") if x == currentimageslot else _("slot%s - %s, without backup")) % (x, imagesList[x]['imagename']), (x, "without backup")))
choices.append((_("No, do not flash image"), False))
self.session.openWithCallback(self.checkMedia, MessageBox, self.message, list=choices, default=currentimageslot, simple=True)
else:
choices = [(_("Yes, with backup"), "with backup"), (_("Yes, without backup"), "without backup"), (_("No, do not flash image"), False)]
self.session.openWithCallback(self.checkMedia, MessageBox, self.message, list=choices, default=False, simple=True)
def checkMedia(self, retval):
if retval:
if SystemInfo["canMultiBoot"]:
self.multibootslot = retval[0]
doBackup = retval[1] == "with backup"
else:
doBackup = retval == "with backup"
def findmedia(path):
def avail(path):
if not path.startswith('/mmc') and os.path.isdir(path) and os.access(path, os.W_OK):
try:
statvfs = os.statvfs(path)
return (statvfs.f_bavail * statvfs.f_frsize) / (1 << 20)
except:
pass
def checkIfDevice(path, diskstats):
st_dev = os.stat(path).st_dev
return (os.major(st_dev), os.minor(st_dev)) in diskstats
diskstats = [(int(x[0]), int(x[1])) for x in [x.split()[0:3] for x in open('/proc/diskstats').readlines()] if x[2].startswith("sd")]
if os.path.isdir(path) and checkIfDevice(path, diskstats) and avail(path) > 500:
return (path, True)
mounts = []
devices = []
for path in ['/media/%s' % x for x in os.listdir('/media')] + (['/media/net/%s' % x for x in os.listdir('/media/net')] if os.path.isdir('/media/net') else []):
if checkIfDevice(path, diskstats):
devices.append((path, avail(path)))
else:
mounts.append((path, avail(path)))
devices.sort(key=lambda x: x[1], reverse=True)
mounts.sort(key=lambda x: x[1], reverse=True)
return ((devices[0][1] > 500 and (devices[0][0], True)) if devices else mounts and mounts[0][1] > 500 and (mounts[0][0], False)) or (None, None)
self.destination, isDevice = findmedia(os.path.isfile(self.BACKUP_SCRIPT) and config.plugins.autobackup.where.value or "/media/hdd")
if self.destination:
destination = os.path.join(self.destination, 'downloaded_images')
self.zippedimage = "://" in self.source and os.path.join(destination, self.imagename) or self.source
self.unzippedimage = os.path.join(destination, '%s.unzipped' % self.imagename[:-4])
try:
if os.path.isfile(destination):
os.remove(destination)
if not os.path.isdir(destination):
os.mkdir(destination)
if doBackup:
if isDevice:
self.startBackupsettings(True)
else:
self.session.openWithCallback(self.startBackupsettings, MessageBox, _("Can only find a network drive to store the backup this means after the flash the autorestore will not work. Alternativaly you can mount the network drive after the flash and perform a manufacurer reset to autorestore"), simple=True)
else:
self.startDownload()
except:
self.session.openWithCallback(self.abort, MessageBox, _("Unable to create the required directories on the media (e.g. USB stick or Harddisk) - Please verify media and try again!"), type=MessageBox.TYPE_ERROR, simple=True)
else:
self.session.openWithCallback(self.abort, MessageBox, _("Could not find suitable media - Please remove some downloaded images or insert a media (e.g. USB stick) with sufficiant free space and try again!"), type=MessageBox.TYPE_ERROR, simple=True)
else:
self.abort()
def startBackupsettings(self, retval):
if retval:
if os.path.isfile(self.BACKUP_SCRIPT):
self["info"].setText(_("Backing up to: %s") % self.destination)
configfile.save()
if config.plugins.autobackup.epgcache.value:
eEPGCache.getInstance().save()
self.containerbackup = Console()
self.containerbackup.ePopen("%s%s'%s' %s" % (self.BACKUP_SCRIPT, config.plugins.autobackup.autoinstall.value and " -a " or " ", self.destination, int(config.plugins.autobackup.prevbackup.value)), self.backupsettingsDone)
else:
self.session.openWithCallback(self.startDownload, MessageBox, _("Unable to backup settings as the AutoBackup plugin is missing, do you want to continue?"), default=False, simple=True)
else:
self.abort()
def backupsettingsDone(self, data, retval, extra_args):
self.containerbackup = None
if retval == 0:
self.startDownload()
else:
self.session.openWithCallback(self.abort, MessageBox, _("Error during backup settings\n%s") % reval, type=MessageBox.TYPE_ERROR, simple=True)
def startDownload(self, reply=True):
self.show()
if reply:
if "://" in self.source:
from Tools.Downloader import downloadWithProgress
self["header"].setText(_("Downloading Image"))
self["info"].setText(self.imagename)
self.downloader = downloadWithProgress(self.source, self.zippedimage)
self.downloader.addProgress(self.downloadProgress)
self.downloader.addEnd(self.downloadEnd)
self.downloader.addError(self.downloadError)
self.downloader.start()
else:
self.unzip()
else:
self.abort()
def downloadProgress(self, current, total):
self["progress"].setValue(int(100 * current / total))
def downloadError(self, reason, status):
self.downloader.stop()
self.session.openWithCallback(self.abort, MessageBox, _("Error during downloading image\n%s\n%s") % (self.imagename, reason), type=MessageBox.TYPE_ERROR, simple=True)
def downloadEnd(self):
self.downloader.stop()
self.unzip()
def unzip(self):
self["header"].setText(_("Unzipping Image"))
self["info"].setText("%s\n%s" % (self.imagename, _("Please wait")))
self["progress"].hide()
self.callLater(self.doUnzip)
def doUnzip(self):
try:
zipfile.ZipFile(self.zippedimage, 'r').extractall(self.unzippedimage)
self.flashimage()
except:
self.session.openWithCallback(self.abort, MessageBox, _("Error during unzipping image\n%s") % self.imagename, type=MessageBox.TYPE_ERROR, simple=True)
def flashimage(self):
self["header"].setText(_("Flashing Image"))
def findimagefiles(path):
for path, subdirs, files in os.walk(path):
if not subdirs and files:
return checkimagefiles(files) and path
imagefiles = findimagefiles(self.unzippedimage)
if imagefiles:
if SystemInfo["canMultiBoot"]:
command = "/usr/bin/ofgwrite -k -r -m%s '%s'" % (self.multibootslot, imagefiles)
else:
command = "/usr/bin/ofgwrite -k -r '%s'" % imagefiles
self.containerofgwrite = Console()
self.containerofgwrite.ePopen(command, self.FlashimageDone)
else:
self.session.openWithCallback(self.abort, MessageBox, _("Image to install is invalid\n%s") % self.imagename, type=MessageBox.TYPE_ERROR, simple=True)
def FlashimageDone(self, data, retval, extra_args):
self.containerofgwrite = None
if retval == 0:
self["header"].setText(_("Flashing image successful"))
self["info"].setText(_("%s\nPress ok for multiboot selection\nPress exit to close") % self.imagename)
else:
self.session.openWithCallback(self.abort, MessageBox, _("Flashing image was not successful\n%s") % self.imagename, type=MessageBox.TYPE_ERROR, simple=True)
def abort(self, reply=None):
if self.getImageList or self.containerofgwrite:
return 0
if self.downloader:
self.downloader.stop()
if self.containerbackup:
self.containerbackup.killAll()
self.close()
def ok(self):
if self["header"].text == _("Flashing image successful"):
self.session.openWithCallback(self.abort, MultibootSelection)
else:
return 0
class MultibootSelection(SelectImage):
def __init__(self, session, *args):
Screen.__init__(self, session)
self.skinName = "SelectImage"
self.session = session
self.expanded = []
self.tmp_dir = None
self.setTitle(_("Multiboot image selector"))
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("Reboot"))
self["key_yellow"] = StaticText()
self["list"] = ChoiceList([])
self["actions"] = ActionMap(["OkCancelActions", "ColorActions", "DirectionActions", "KeyboardInputActions", "MenuActions"],
{
"ok": self.keyOk,
"cancel": self.cancel,
"red": self.cancel,
"green": self.keyOk,
"yellow": self.deleteImage,
"up": self.keyUp,
"down": self.keyDown,
"left": self.keyLeft,
"right": self.keyRight,
"upRepeated": self.keyUp,
"downRepeated": self.keyDown,
"leftRepeated": self.keyLeft,
"rightRepeated": self.keyRight,
"menu": boundFunction(self.cancel, True),
}, -1)
self.currentimageslot = getCurrentImage()
self.tmp_dir = tempfile.mkdtemp(prefix="MultibootSelection")
Console().ePopen('mount %s %s' % (SystemInfo["MultibootStartupDevice"], self.tmp_dir))
self.getImagesList()
def cancel(self, value=None):
Console().ePopen('umount %s' % self.tmp_dir)
if not os.path.ismount(self.tmp_dir):
os.rmdir(self.tmp_dir)
if value == 2:
from Screens.Standby import TryQuitMainloop
self.session.open(TryQuitMainloop, 2)
else:
self.close(value)
def getImagesList(self):
list = []
imagesList = getImagelist()
mode = getCurrentImageMode() or 0
self.deletedImagesExists = False
if imagesList:
for index, x in enumerate(sorted(imagesList.keys())):
if imagesList[x]["imagename"] == _("Deleted image"):
self.deletedImagesExists = True
elif imagesList[x]["imagename"] != _("Empty slot"):
if SystemInfo["canMode12"]:
list.insert(index, ChoiceEntryComponent('', ((_("slot%s - %s mode 1 (current image)") if x == self.currentimageslot and mode != 12 else _("slot%s - %s mode 1")) % (x, imagesList[x]['imagename']), (x, 1))))
list.append(ChoiceEntryComponent('', ((_("slot%s - %s mode 12 (current image)") if x == self.currentimageslot and mode == 12 else _("slot%s - %s mode 12")) % (x, imagesList[x]['imagename']), (x, 12))))
else:
list.append(ChoiceEntryComponent('', ((_("slot%s - %s (current image)") if x == self.currentimageslot and mode != 12 else _("slot%s - %s")) % (x, imagesList[x]['imagename']), (x, 1))))
if os.path.isfile(os.path.join(self.tmp_dir, "STARTUP_RECOVERY")):
list.append(ChoiceEntryComponent('', ((_("Boot to Recovery menu")), "Recovery")))
if os.path.isfile(os.path.join(self.tmp_dir, "STARTUP_ANDROID")):
list.append(ChoiceEntryComponent('', ((_("Boot to Android image")), "Android")))
if not list:
list.append(ChoiceEntryComponent('', ((_("No images found")), "Waiter")))
self["list"].setList(list)
self.selectionChanged()
def deleteImage(self):
if self["key_yellow"].text == _("Restore deleted images"):
self.session.openWithCallback(self.deleteImageCallback, MessageBox, _("Are you sure to restore all deleted images"), simple=True)
elif self["key_yellow"].text == _("Delete Image"):
self.session.openWithCallback(self.deleteImageCallback, MessageBox, "%s:\n%s" % (_("Are you sure to delete image:"), self.currentSelected[0][0]), simple=True)
def deleteImageCallback(self, answer):
if answer:
if self["key_yellow"].text == _("Restore deleted images"):
restoreImages()
else:
deleteImage(self.currentSelected[0][1][0])
self.getImagesList()
def keyOk(self):
self.session.openWithCallback(self.doReboot, MessageBox, "%s:\n%s" % (_("Are you sure to reboot to"), self.currentSelected[0][0]), simple=True)
def doReboot(self, answer):
if answer:
slot = self.currentSelected[0][1]
if slot == "Recovery":
shutil.copyfile(os.path.join(self.tmp_dir, "STARTUP_RECOVERY"), os.path.join(self.tmp_dir, "STARTUP"))
elif slot == "Android":
shutil.copyfile(os.path.join(self.tmp_dir, "STARTUP_ANDROID"), os.path.join(self.tmp_dir, "STARTUP"))
elif SystemInfo["canMultiBoot"][slot[0]]['startupfile']:
if SystemInfo["canMode12"]:
startupfile = os.path.join(self.tmp_dir, "%s_%s" % (SystemInfo["canMultiBoot"][slot[0]]['startupfile'].rsplit('_', 1)[0], slot[1]))
else:
startupfile = os.path.join(self.tmp_dir, "%s" % SystemInfo["canMultiBoot"][slot[0]]['startupfile'])
shutil.copyfile(startupfile, os.path.join(self.tmp_dir, "STARTUP"))
else:
model = HardwareInfo().get_machine_name()
if slot[1] == 1:
startupFileContents = "boot emmcflash0.kernel%s 'root=/dev/mmcblk0p%s rw rootwait %s_4.boxmode=1'\n" % (slot[0], slot[0] * 2 + 1, model)
else:
startupFileContents = "boot emmcflash0.kernel%s 'brcm_cma=520M@248M brcm_cma=%s@768M root=/dev/mmcblk0p%s rw rootwait %s_4.boxmode=12'\n" % (slot[0], SystemInfo["canMode12"], slot[0] * 2 + 1, model)
open(os.path.join(self.tmp_dir, "STARTUP"), 'w').write(startupFileContents)
self.cancel(2)
def selectionChanged(self):
self.currentSelected = self["list"].l.getCurrentSelection()
if type(self.currentSelected[0][1]) is tuple and self.currentimageslot != self.currentSelected[0][1][0]:
self["key_yellow"].setText(_("Delete Image"))
elif self.deletedImagesExists:
self["key_yellow"].setText(_("Restore deleted images"))
else:
self["key_yellow"].setText("")
| openNSS/enigma2 | lib/python/Screens/FlashImage.py | Python | gpl-2.0 | 22,428 |
from glanerbeard.web import app
from glanerbeard.default_settings import DEV_LISTEN_HOST, DEV_LISTEN_PORT
if __name__ == '__main__':
app.debug = True
app.run(DEV_LISTEN_HOST, DEV_LISTEN_PORT)
| daenney/glanerbeard | dev.py | Python | apache-2.0 | 195 |
#!/usr/bin/env python
# coding: utf-8
l = [9,8,7,6,5,4,3,2,1]
for x in range(len(l)-1,0,-1):
for y in range(0,x):
print l[y], l[y+1]
if l[y] > l[y+1]:
l[y], l[y+1] = l[y+1], l[y]
print l
raw_input("")
| 51reboot/actual_13_homework | 03/peter/paixu.py | Python | mit | 251 |
#!/usr/bin/env python
# This illustrates how to use SoCo plugins
# an example plugin is provided in soco.plugins.example.ExamplePlugin
import time
from soco import SoCo
from soco.plugins import SoCoPlugin
def main():
speakers = [speaker.ip_address for speaker in SoCo.discover()]
if not speakers:
print("no speakers found, exiting.")
return
soco = SoCo(speakers[0])
# get a plugin by name (eg from a config file)
myplugin = SoCoPlugin.from_name(
"soco.plugins.example.ExamplePlugin", soco, "some user"
)
# do something with your plugin
print("Testing", myplugin.name)
myplugin.music_plugin_play()
time.sleep(5)
# create a plugin by normal instantiation
from soco.plugins.example import ExamplePlugin
# create a new plugin, pass the soco instance to it
myplugin = ExamplePlugin(soco, "a user")
print("Testing", myplugin.name)
# do something with your plugin
myplugin.music_plugin_stop()
if __name__ == "__main__":
main()
| petteraas/SoCo | examples/plugins/socoplugins.py | Python | mit | 1,032 |
"""empty message
Revision ID: 251447ab2060
Revises: 53ac0b4e8891
Create Date: 2018-09-19 09:49:05.552597
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '251447ab2060'
down_revision = '53ac0b4e8891'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('transactions', sa.Column('lot_number', sa.String(length=64), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('transactions', 'lot_number')
# ### end Alembic commands ###
| gems-uff/labsys | migrations/versions/2018-09-19_09:49:05__251447ab2060.py | Python | mit | 680 |
#
# Copyright (C) 2011 Red Hat, Inc.
#
# Author: Steven Dake <[email protected]>
# Angus Salkeld <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
import os
import logging
import libxml2
import exceptions
import libvirt
from pcloudsh import deployable
class LibVirtDeployable(deployable.Deployable):
def __init__(self, factory, name, username):
self.infrastructure = 'libvirt'
self.username = username
deployable.Deployable.__init__(self, factory, name)
def status(self):
try:
c = libvirt.open("qemu:///system")
except:
self.l.exception('*** couldn\'t connect to libvirt')
name_list = self.factory.doc.xpathEval("/deployables/deployable[@name='%s']/assembly" % self.name)
print ' %-12s %-12s' % ('Assembly', 'Status')
print '------------------------'
for a in name_list:
an = a.prop('name')
try:
ass = c.lookupByName(an)
if ass.isActive():
print " %-12s %-12s" % (an, 'Running')
else:
print " %-12s %-12s" % (an, 'Stopped')
except libvirt.libvirtError as e:
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
print " %-12s %-12s" % (an, 'Undefined')
else:
print " %-12s %-12s" % (an, 'Error')
try:
c.close()
except:
self.l.exception('*** couldn\'t disconnect from libvirt')
| sdake/pacemaker-cloud | src/pcloudsh/libvirt_deployable.py | Python | gpl-2.0 | 2,138 |
"""
WSGI config for mysite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
application = get_wsgi_application()
| pmutale/landing-page | mysite/wsgi.py | Python | gpl-3.0 | 383 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from nova.api.openstack.compute.views import flavors as flavors_view
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.compute import flavors
from nova import exception
from nova.i18n import _
authorize = extensions.extension_authorizer('compute', 'flavormanage')
class FlavorManageController(wsgi.Controller):
"""The Flavor Lifecycle API controller for the OpenStack API."""
_view_builder_class = flavors_view.ViewBuilder
def __init__(self):
super(FlavorManageController, self).__init__()
@wsgi.action("delete")
def _delete(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
flavor = flavors.get_flavor_by_flavor_id(
id, ctxt=context, read_deleted="no")
except exception.FlavorNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
flavors.destroy(flavor['name'])
return webob.Response(status_int=202)
@wsgi.action("create")
def _create(self, req, body):
context = req.environ['nova.context']
authorize(context)
if not self.is_valid_body(body, 'flavor'):
msg = _("Invalid request body")
raise webob.exc.HTTPBadRequest(explanation=msg)
vals = body['flavor']
name = vals.get('name')
if name is None:
msg = _("A valid name parameter is required")
raise webob.exc.HTTPBadRequest(explanation=msg)
flavorid = vals.get('id')
memory = vals.get('ram')
if memory is None:
msg = _("A valid ram parameter is required")
raise webob.exc.HTTPBadRequest(explanation=msg)
vcpus = vals.get('vcpus')
if vcpus is None:
msg = _("A valid vcpus parameter is required")
raise webob.exc.HTTPBadRequest(explanation=msg)
root_gb = vals.get('disk')
if root_gb is None:
msg = _("A valid disk parameter is required")
raise webob.exc.HTTPBadRequest(explanation=msg)
ephemeral_gb = vals.get('OS-FLV-EXT-DATA:ephemeral', 0)
swap = vals.get('swap', 0)
rxtx_factor = vals.get('rxtx_factor', 1.0)
is_public = vals.get('os-flavor-access:is_public', True)
try:
flavor = flavors.create(name, memory, vcpus, root_gb,
ephemeral_gb=ephemeral_gb,
flavorid=flavorid, swap=swap,
rxtx_factor=rxtx_factor,
is_public=is_public)
req.cache_db_flavor(flavor)
except (exception.FlavorExists,
exception.FlavorIdExists) as err:
raise webob.exc.HTTPConflict(explanation=err.format_message())
except exception.InvalidInput as exc:
raise webob.exc.HTTPBadRequest(explanation=exc.format_message())
except exception.FlavorCreateFailed as exc:
raise webob.exc.HTTPInternalServerError(explanation=
exc.format_message())
return self._view_builder.show(req, flavor)
class Flavormanage(extensions.ExtensionDescriptor):
"""Flavor create/delete API support."""
name = "FlavorManage"
alias = "os-flavor-manage"
namespace = ("http://docs.openstack.org/compute/ext/"
"flavor_manage/api/v1.1")
updated = "2012-01-19T00:00:00Z"
def get_controller_extensions(self):
controller = FlavorManageController()
extension = extensions.ControllerExtension(self, 'flavors', controller)
return [extension]
| scripnichenko/nova | nova/api/openstack/compute/legacy_v2/contrib/flavormanage.py | Python | apache-2.0 | 4,241 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pipeline.param.base_param import BaseParam
from pipeline.param import consts
class EncryptParam(BaseParam):
"""
Define encryption method that used in federated ml.
Parameters
----------
method : {'Paillier'}
If method is 'Paillier', Paillier encryption will be used for federated ml.
To use non-encryption version in HomoLR, set this to None.
For detail of Paillier encryption, please check out the paper mentioned in README file.
key_length : int, default: 1024
Used to specify the length of key in this encryption method.
"""
def __init__(self, method=consts.PAILLIER, key_length=1024):
super(EncryptParam, self).__init__()
self.method = method
self.key_length = key_length
def check(self):
return True
| FederatedAI/FATE | python/fate_client/pipeline/param/encrypt_param.py | Python | apache-2.0 | 1,481 |
###############################################################################
# Copyright 2016 - Climate Research Division
# Environment and Climate Change Canada
#
# This file is part of the "EC-CAS diags" package.
#
# "EC-CAS diags" is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# "EC-CAS diags" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with "EC-CAS diags". If not, see <http://www.gnu.org/licenses/>.
###############################################################################
from .gem import GEM_Data
class ECCAS_Data(GEM_Data):
"""
EC-CAS model output for forward runs (no assimilation).
For recent experiments, where tracers are defined w.r.t. dry air.
"""
field_list = GEM_Data.field_list + (
('CO2', 'CO2', 'ug(C) kg(dry_air)-1'),
('CBB', 'CO2_fire', 'ug(C) kg(dry_air)-1'),
('CFF', 'CO2_fossil', 'ug(C) kg(dry_air)-1'),
('COC', 'CO2_ocean', 'ug(C) kg(dry_air)-1'),
('CLA', 'CO2_bio', 'ug(C) kg(dry_air)-1'),
('CO2B', 'CO2_background', 'ug(C) kg(dry_air)-1'),
('CH4', 'CH4', 'ug kg(dry_air)-1'),
('CH4B', 'CH4_background', 'ug kg(dry_air)-1'),
('CHFF', 'CH4_fossil', 'ug kg(dry_air)-1'),
('CHBB', 'CH4_bioburn', 'ug kg(dry_air)-1'),
('CHOC', 'CH4_ocean', 'ug kg(dry_air)-1'),
('CHNA', 'CH4_natural', 'ug kg(dry_air)-1'),
('CHAG', 'CH4_agwaste', 'ug kg(dry_air)-1'),
('TCO', 'CO', 'ug kg(dry_air)-1'),
('OH', 'OH', 'molecules m-3'),
('KTN', 'eddy_diffusivity', 'm2 s-1'),
('RHO', 'density', 'kg(air) m-3'),
('XCO2', 'XCO2', 'ug(C) kg(dry_air)-1'),
('XCH4', 'XCH4', 'ug kg(dry_air)-1'),
)
# Method to decode an opened dataset (standardize variable names, and add any
# extra info needed (pressure values, cell area, etc.)
@classmethod
def decode (cls,dataset):
from ..common import conversion_factor
from .gem import GEM_Data
# Do generic GEM field decoding
dataset = GEM_Data.decode.__func__(cls,dataset)
dataset = list(dataset)
for i,var in enumerate(dataset):
# Offset the ocean and land fields by 100ppm
varname = var.name
if varname == 'CO2_ocean':
var -= conversion_factor('100 ppm', 'ug(C) kg(dry_air)-1', context='CO2')
if varname == 'CO2_bio':
var -= conversion_factor('100 ppm', 'ug(C) kg(dry_air)-1', context='CO2')
# Add species name for all products (to assist in things like unit conversion)
if varname.startswith('CO2'):
var.atts['specie'] = 'CO2'
elif varname.startswith('CH4'):
var.atts['specie'] = 'CH4'
elif varname.startswith('CO'):
var.atts['specie'] = 'CO'
# Fix any name clobbering from doing math on the fields.
var.name = varname
# Write the updated variable back to the list.
dataset[i] = var
return dataset
# Method to re-encode data into the source context
# (e.g., rename fields to what would be originally in these kinds of files)
@classmethod
def encode (cls, dataset):
from ..common import conversion_factor
from .gem import GEM_Data
# Call the generic GEM encoder to convert to the right units and field names
dataset = GEM_Data.encode.__func__(cls,dataset)
# Do some extra stuff to offset COC / CLA fields
for i, var in enumerate(dataset):
if var.name in ('COC','CLA'):
dataset[i] = (var + conversion_factor('100 ppm', 'ug(C) kg(dry_air)-1', context='CO2')).as_type('float32')
return dataset
# For our forward cycles, we need to hard-code the ig1/ig2 of the tracers.
# This is so we match the ip1/ip2 of the wind archive we're injecting
# into the "analysis" files.
@staticmethod
def _fstd_tweak_records (records):
import numpy as np
# Select non-coordinate records (things that aren't already using IP2)
ind = (records['ip2'] == 0)
# Hard code the ig1 / ig2
records['ig1'][ind] = 95037
records['ig2'][ind] = 85819
records['ig3'][ind] = 1
# Update the coordinate records to be consistent.
records['ip1'][~ind] = 95037
records['ip2'][~ind] = 85819
records['ip3'][~ind] = 1
# Just for completion, set the typvar and deet as well.
records['typvar'][ind] = 'A'
records['deet'][ind] = 0
# For our EnKF cycles, we need to hard-code the ig1/ig2 of the tracers.
# This is so we match the ip1/ip2 of the EnKF initial file we're injecting
# into.
# Rename this to _fstd_tweak_records to activate.
@staticmethod
def _fstd_tweak_records_enkf (records):
# Select non-coordinate records (things that aren't already using IP2)
ind = (records['ip2'] == 0)
# Hard code the ig1 / ig2
records['ig1'][ind] = 38992
records['ig2'][ind] = 45710
records['ig3'][ind] = 1
# Update the coordinate records to be consistent.
records['ip1'][~ind] = 38992
records['ip2'][~ind] = 45710
records['ip3'][~ind] = 1
# Just for completion, set the typvar and deet as well.
records['typvar'][ind] = 'A'
records['deet'][ind] = 0
# Add this interface to the table.
from . import table
table['eccas-dry'] = ECCAS_Data
| neishm/EC-CAS-diags | eccas_diags/interfaces/eccas_dry.py | Python | lgpl-3.0 | 5,553 |
# -*- coding: utf-8 -*-
#
# This file is part of Lumberjack.
# Copyright 2014 CERN.
#
# Lumberjack is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# Lumberjack is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Lumberjack. If not, see <http://www.gnu.org/licenses/>.
"""Python Elasticsearch Logging Handler."""
from __future__ import absolute_import
from elasticsearch import Elasticsearch
from .handler import ElasticsearchHandler
from .schemas import SchemaManager
from .actions import ActionQueue
from .config import get_default_config
import logging
from copy import deepcopy
LOG = logging.getLogger(__name__)
# TODO: debug mode -- synchronous, no queueing
class Lumberjack(object):
"""This is the initialisation point for using the lumberjack library.
In the intended use-case, this class is instantiated once and creates
handlers for use with Python's logging module.
For each type of log you want to store, you should provide a schema. If
you don't, nothing bad will happen, but it makes your cluster rather
space-inefficient by default.
You should provide either a list of Elasticsearch hosts, or an
already-instantiated ``Elasticsearch`` object from elasticsearch-py.
:param hosts: A list of Elasticsearch nodes to connect to, in the form
``[{'host': '127.0.0.1', 'port': 9200}]``. This is passed directly to
elasticsearch.Elasticsearch.
:param elasticsearch: As an alternative to hosts, an already-instantiated
``elasticsearch.Elasticsearch`` object, perhaps with custom transports
etc.
:param config: A configuration for Lumberjack. See the Configuration
section in the docs for details.
"""
def __init__(self, hosts=None, elasticsearch=None, config=None):
"""Init method. See class docstring."""
# TODO: clean this up. Error if both or neither are provided.
if elasticsearch is not None:
LOG.debug('Using provided ES instance.')
self.elasticsearch = elasticsearch
elif hosts is None:
raise TypeError('You must provide either hosts or elasticsearch.')
else:
LOG.debug('Using provided hosts.')
self.elasticsearch = Elasticsearch(hosts=hosts)
if config is None:
self.config = get_default_config()
else:
self.config = config
self.schema_manager = SchemaManager(self.elasticsearch, self.config)
self.action_queue = ActionQueue(self.elasticsearch, self.config)
self.action_queue.start()
def trigger_flush(self):
"""Manually trigger a flush of the log queue.
:note: This is not guaranteed to flush immediately; it merely cancels
the wait before the next flush in the ``ActionQueue`` thread.
"""
self.action_queue.trigger_flush()
def get_handler(self, suffix_format='%Y.%m'):
"""Spawn a new logging handler.
You should use this method to get a ``logging.Handler`` object to
attach to a ``logging.Logger`` object.
:note: It is almost definitely unwise to set the formatter of this
handler yourself. The integrated formatter prepares documents
ready to be inserted into Elasticsearch.
:param suffix_format: The time format string to use as the suffix for
the indices. By default your indices will be called, e.g.,
``generic-logging-2014.09``.
"""
handler = ElasticsearchHandler(action_queue=self.action_queue,
suffix_format=suffix_format)
return handler
def register_schema(self, logger, schema):
"""Register a new log entry schema.
It is a good idea to register a 'schema' for every logger that you
attach a handler to. This helps Elasticsearch store the data you
provide optimally.
:note: This method will block until the mapping is registered with
Elasticsearch, so you should do it in your initialisation.
:param logger: The name of the logger this schema will apply to.
:param schema: The schema to be used.
"""
self.schema_manager.register_schema(logger, schema)
| egabancho/lumberjack | lumberjack/api.py | Python | gpl-3.0 | 4,727 |
# Always prefer setuptools over distutils
from os import path
try:
from setuptools import setup
except ImportError:
print("warning: you do not have setuptools installed - cannot use \"develop\" option")
from distutils.core import setup
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), 'r') as f:
long_description = f.read()
setup(
name='tinyevolver',
version='0.2',
description='A simple, tiny engine for creating genetic algorithms.',
long_description=long_description,
# The project's main homepage.
url='https://github.com/olliemath/Python-TinyEvolver/',
# Author details
author='Oliver Margetts',
author_email='[email protected]',
# Choose your license
license='GNU GPL V.2',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords=['genetic', 'evolution', 'algorithms', 'optimization'],
packages=['tinyevolver'],
)
| olliemath/Python-TinyEvolver | setup.py | Python | gpl-2.0 | 1,555 |
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_system_replacemsg_alertmail
short_description: Replacement messages in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify system_replacemsg feature and alertmail category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
system_replacemsg_alertmail:
description:
- Replacement messages.
default: null
type: dict
suboptions:
buffer:
description:
- Message string.
type: str
format:
description:
- Format flag.
type: str
choices:
- none
- text
- html
- wml
header:
description:
- Header flag.
type: str
choices:
- none
- http
- 8bit
msg_type:
description:
- Message type.
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Replacement messages.
fortios_system_replacemsg_alertmail:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
system_replacemsg_alertmail:
buffer: "<your_own_value>"
format: "none"
header: "none"
msg_type: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_system_replacemsg_alertmail_data(json):
option_list = ['buffer', 'format', 'header',
'msg_type']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def system_replacemsg_alertmail(data, fos):
vdom = data['vdom']
state = data['state']
system_replacemsg_alertmail_data = data['system_replacemsg_alertmail']
filtered_data = underscore_to_hyphen(filter_system_replacemsg_alertmail_data(system_replacemsg_alertmail_data))
if state == "present":
return fos.set('system.replacemsg',
'alertmail',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('system.replacemsg',
'alertmail',
mkey=filtered_data['msg-type'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_system_replacemsg(data, fos):
if data['system_replacemsg_alertmail']:
resp = system_replacemsg_alertmail(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"system_replacemsg_alertmail": {
"required": False, "type": "dict", "default": None,
"options": {
"buffer": {"required": False, "type": "str"},
"format": {"required": False, "type": "str",
"choices": ["none", "text", "html",
"wml"]},
"header": {"required": False, "type": "str",
"choices": ["none", "http", "8bit"]},
"msg_type": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_system_replacemsg(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_system_replacemsg(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| kustodian/ansible | lib/ansible/modules/network/fortios/fortios_system_replacemsg_alertmail.py | Python | gpl-3.0 | 10,160 |
# -*- coding: utf-8 -*-
from __future__ import with_statement
import os
import shutil
import threading
from pyload.manager.Event import AccountUpdateEvent
from pyload.utils import lock
ACC_VERSION = 1
class AccountManager(object):
"""Manages all accounts"""
def __init__(self, core):
"""Constructor"""
self.core = core
self.lock = threading.Lock()
self.initPlugins()
self.saveAccounts() #: save to add categories to conf
def initPlugins(self):
self.accounts = {} #: key = ( plugin )
self.plugins = {}
self.initAccountPlugins()
self.loadAccounts()
def getAccountPlugin(self, plugin):
"""Get account instance for plugin or None if anonymous"""
try:
if plugin in self.accounts:
if plugin not in self.plugins:
klass = self.core.pluginManager.loadClass("accounts", plugin)
if klass:
self.plugins[plugin] = klass(self, self.accounts[plugin])
else: #@NOTE: The account class no longer exists (blacklisted plugin). Skipping the account to avoid crash
raise
return self.plugins[plugin]
else:
raise
except Exception:
return None
def getAccountPlugins(self):
"""Get all account instances"""
plugins = []
for plugin in self.accounts.keys():
plugins.append(self.getAccountPlugin(plugin))
return plugins
#----------------------------------------------------------------------
def loadAccounts(self):
"""Loads all accounts available"""
try:
with open("accounts.conf", "a+") as f:
content = f.readlines()
version = content[0].split(":")[1].strip() if content else ""
if not version or int(version) < ACC_VERSION:
shutil.copy("accounts.conf", "accounts.backup")
f.seek(0)
f.write("version: " + str(ACC_VERSION))
self.core.log.warning(_("Account settings deleted, due to new config format"))
return
except IOError, e:
self.core.log.error(str(e))
return
plugin = ""
name = ""
for line in content[1:]:
line = line.strip()
if not line:
continue
if line.startswith("#"):
continue
if line.startswith("version"):
continue
if line.endswith(":") and line.count(":") == 1:
plugin = line[:-1]
self.accounts[plugin] = {}
elif line.startswith("@"):
try:
option = line[1:].split()
self.accounts[plugin][name]['options'][option[0]] = [] if len(option) < 2 else ([option[1]] if len(option) < 3 else option[1:])
except Exception:
pass
elif ":" in line:
name, sep, pw = line.partition(":")
self.accounts[plugin][name] = {"password": pw, "options": {}, "valid": True}
#----------------------------------------------------------------------
def saveAccounts(self):
"""Save all account information"""
try:
with open("accounts.conf", "wb") as f:
f.write("version: " + str(ACC_VERSION) + "\n")
for plugin, accounts in self.accounts.iteritems():
f.write("\n")
f.write(plugin + ":\n")
for name, data in accounts.iteritems():
f.write("\n\t%s:%s\n" % (name, data['password']) )
if data['options']:
for option, values in data['options'].iteritems():
f.write("\t@%s %s\n" % (option, " ".join(values)))
try:
os.chmod(f.name, 0600)
except Exception:
pass
except Exception, e:
self.core.log.error(str(e))
#----------------------------------------------------------------------
def initAccountPlugins(self):
"""Init names"""
for name in self.core.pluginManager.getAccountPlugins():
self.accounts[name] = {}
@lock
def updateAccount(self, plugin, user, password=None, options={}):
"""Add or update account"""
if plugin in self.accounts:
p = self.getAccountPlugin(plugin)
updated = p.updateAccounts(user, password, options)
# since accounts is a ref in plugin self.accounts doesnt need to be updated here
self.saveAccounts()
if updated: p.scheduleRefresh(user, force=False)
@lock
def removeAccount(self, plugin, user):
"""Remove account"""
if plugin in self.accounts:
p = self.getAccountPlugin(plugin)
p.removeAccount(user)
self.saveAccounts()
@lock
def getAccountInfos(self, force=True, refresh=False):
data = {}
if refresh:
self.core.scheduler.addJob(0, self.core.accountManager.getAccountInfos)
force = False
for p in self.accounts.keys():
if self.accounts[p]:
p = self.getAccountPlugin(p)
if p:
data[p.__class__.__name__] = p.getAllAccounts(force)
else: #@NOTE: When an account has been skipped, p is None
data[p] = []
else:
data[p] = []
e = AccountUpdateEvent()
self.core.pullManager.addEvent(e)
return data
def sendChange(self):
e = AccountUpdateEvent()
self.core.pullManager.addEvent(e)
| ardi69/pyload-0.4.10 | pyload/manager/Account.py | Python | gpl-3.0 | 5,902 |
from .buffer import TextModification, Buffer
from .buffer_history import BufferHistory
from .buffer_manipulator import BufferManipulator
from .cursor import Cursor, ModifiedCursor
from .span import Span, Region
from .selection import Selection
| sam-roth/Keypad | keypad/buffers/__init__.py | Python | gpl-3.0 | 247 |
# ----------------------------------------------------------------------------
# pymunk
# Copyright (c) 2007-2011 Victor Blomqvist
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ----------------------------------------------------------------------------
"""This module contain the Vec2d class that is used in all of pymunk when a
vector is needed.
The Vec2d class is used almost everywhere in pymunk for 2d coordinates and
vectors, for example to define gravity vector in a space. However, pymunk is
smart enough to convert tuples or tuple like objects to Vec2ds so you usually
do not need to explcitily do conversions if you happen to have a tuple::
>>> import pymunk
>>> space = pymunk.Space()
>>> print space.gravity
Vec2d(0.0, 0.0)
>>> space.gravity = 3,5
>>> print space.gravity
Vec2d(3.0, 5.0)
>>> space.gravity += 2,6
>>> print space.gravity
Vec2d(5.0, 11.0)
"""
__version__ = "$Id: vec2d.py 478 2013-01-21 11:01:39Z [email protected] $"
__docformat__ = "reStructuredText"
import operator
import math
import ctypes
float_type = ctypes.c_double
__all__ = ["Vec2d"]
class Vec2d(ctypes.Structure):
"""2d vector class, supports vector and scalar operators, and also
provides some high level functions.
"""
__slots__ = ['x', 'y']
@classmethod
def from_param(cls, arg):
"""Used by ctypes to automatically create Vec2ds"""
return cls(arg)
def __init__(self, x_or_pair=None, y = None):
if x_or_pair != None:
if y == None:
self.x = x_or_pair[0]
self.y = x_or_pair[1]
else:
self.x = x_or_pair
self.y = y
def __len__(self):
return 2
def __getitem__(self, key):
if key == 0:
return self.x
elif key == 1:
return self.y
else:
raise IndexError("Invalid subscript "+str(key)+" to Vec2d")
def __setitem__(self, key, value):
if key == 0:
self.x = value
elif key == 1:
self.y = value
else:
raise IndexError("Invalid subscript "+str(key)+" to Vec2d")
# String representaion (for debugging)
def __repr__(self):
return 'Vec2d(%s, %s)' % (self.x, self.y)
# Comparison
def __eq__(self, other):
if hasattr(other, "__getitem__") and len(other) == 2:
return self.x == other[0] and self.y == other[1]
else:
return False
def __ne__(self, other):
if hasattr(other, "__getitem__") and len(other) == 2:
return self.x != other[0] or self.y != other[1]
else:
return True
def __nonzero__(self):
return self.x != 0.0 or self.y != 0.0
# Generic operator handlers
def _o2(self, other, f):
"Any two-operator operation where the left operand is a Vec2d"
if isinstance(other, Vec2d):
return Vec2d(f(self.x, other.x),
f(self.y, other.y))
elif (hasattr(other, "__getitem__")):
return Vec2d(f(self.x, other[0]),
f(self.y, other[1]))
else:
return Vec2d(f(self.x, other),
f(self.y, other))
def _r_o2(self, other, f):
"Any two-operator operation where the right operand is a Vec2d"
if (hasattr(other, "__getitem__")):
return Vec2d(f(other[0], self.x),
f(other[1], self.y))
else:
return Vec2d(f(other, self.x),
f(other, self.y))
def _io(self, other, f):
"inplace operator"
if (hasattr(other, "__getitem__")):
self.x = f(self.x, other[0])
self.y = f(self.y, other[1])
else:
self.x = f(self.x, other)
self.y = f(self.y, other)
return self
# Addition
def __add__(self, other):
if isinstance(other, Vec2d):
return Vec2d(self.x + other.x, self.y + other.y)
elif hasattr(other, "__getitem__"):
return Vec2d(self.x + other[0], self.y + other[1])
else:
return Vec2d(self.x + other, self.y + other)
__radd__ = __add__
def __iadd__(self, other):
if isinstance(other, Vec2d):
self.x += other.x
self.y += other.y
elif hasattr(other, "__getitem__"):
self.x += other[0]
self.y += other[1]
else:
self.x += other
self.y += other
return self
# Subtraction
def __sub__(self, other):
if isinstance(other, Vec2d):
return Vec2d(self.x - other.x, self.y - other.y)
elif (hasattr(other, "__getitem__")):
return Vec2d(self.x - other[0], self.y - other[1])
else:
return Vec2d(self.x - other, self.y - other)
def __rsub__(self, other):
if isinstance(other, Vec2d):
return Vec2d(other.x - self.x, other.y - self.y)
if (hasattr(other, "__getitem__")):
return Vec2d(other[0] - self.x, other[1] - self.y)
else:
return Vec2d(other - self.x, other - self.y)
def __isub__(self, other):
if isinstance(other, Vec2d):
self.x -= other.x
self.y -= other.y
elif (hasattr(other, "__getitem__")):
self.x -= other[0]
self.y -= other[1]
else:
self.x -= other
self.y -= other
return self
# Multiplication
def __mul__(self, other):
if isinstance(other, Vec2d):
return Vec2d(self.x*other.x, self.y*other.y)
if (hasattr(other, "__getitem__")):
return Vec2d(self.x*other[0], self.y*other[1])
else:
return Vec2d(self.x*other, self.y*other)
__rmul__ = __mul__
def __imul__(self, other):
if isinstance(other, Vec2d):
self.x *= other.x
self.y *= other.y
elif (hasattr(other, "__getitem__")):
self.x *= other[0]
self.y *= other[1]
else:
self.x *= other
self.y *= other
return self
# Division
def __div__(self, other):
return self._o2(other, operator.div)
def __rdiv__(self, other):
return self._r_o2(other, operator.div)
def __idiv__(self, other):
return self._io(other, operator.div)
def __floordiv__(self, other):
return self._o2(other, operator.floordiv)
def __rfloordiv__(self, other):
return self._r_o2(other, operator.floordiv)
def __ifloordiv__(self, other):
return self._io(other, operator.floordiv)
def __truediv__(self, other):
return self._o2(other, operator.truediv)
def __rtruediv__(self, other):
return self._r_o2(other, operator.truediv)
def __itruediv__(self, other):
return self._io(other, operator.truediv)
# Modulo
def __mod__(self, other):
return self._o2(other, operator.mod)
def __rmod__(self, other):
return self._r_o2(other, operator.mod)
def __divmod__(self, other):
return self._o2(other, divmod)
def __rdivmod__(self, other):
return self._r_o2(other, divmod)
# Exponentation
def __pow__(self, other):
return self._o2(other, operator.pow)
def __rpow__(self, other):
return self._r_o2(other, operator.pow)
# Bitwise operators
def __lshift__(self, other):
return self._o2(other, operator.lshift)
def __rlshift__(self, other):
return self._r_o2(other, operator.lshift)
def __rshift__(self, other):
return self._o2(other, operator.rshift)
def __rrshift__(self, other):
return self._r_o2(other, operator.rshift)
def __and__(self, other):
return self._o2(other, operator.and_)
__rand__ = __and__
def __or__(self, other):
return self._o2(other, operator.or_)
__ror__ = __or__
def __xor__(self, other):
return self._o2(other, operator.xor)
__rxor__ = __xor__
# Unary operations
def __neg__(self):
return Vec2d(operator.neg(self.x), operator.neg(self.y))
def __pos__(self):
return Vec2d(operator.pos(self.x), operator.pos(self.y))
def __abs__(self):
return Vec2d(abs(self.x), abs(self.y))
def __invert__(self):
return Vec2d(-self.x, -self.y)
# vectory functions
def get_length_sqrd(self):
"""Get the squared length of the vector.
It is more efficent to use this method instead of first call
get_length() or access .length and then do a sqrt().
:return: The squared length
"""
return self.x**2 + self.y**2
def get_length(self):
"""Get the length of the vector.
:return: The length
"""
return math.sqrt(self.x**2 + self.y**2)
def __setlength(self, value):
length = self.get_length()
self.x *= value/length
self.y *= value/length
length = property(get_length, __setlength,
doc = """Gets or sets the magnitude of the vector""")
def rotate(self, angle_radians):
"""Rotate the vector by angle_radians radians."""
cos = math.cos(angle_radians)
sin = math.sin(angle_radians)
x = self.x*cos - self.y*sin
y = self.x*sin + self.y*cos
self.x = x
self.y = y
def rotated(self, angle_radians):
"""Create and return a new vector by rotating this vector by
angle_radians radians.
:return: Rotated vector
"""
cos = math.cos(angle_radians)
sin = math.sin(angle_radians)
x = self.x*cos - self.y*sin
y = self.x*sin + self.y*cos
return Vec2d(x, y)
def rotate_degrees(self, angle_degrees):
"""Rotate the vector by angle_degrees degrees."""
self.rotate(math.radians(angle_degrees))
def rotated_degrees(self, angle_degrees):
"""Create and return a new vector by rotating this vector by
angle_degrees degrees.
:return: Rotade vector
"""
return self.rotated(math.radians(angle_degrees))
def get_angle(self):
if (self.get_length_sqrd() == 0):
return 0
return math.atan2(self.y, self.x)
def __setangle(self, angle):
self.x = self.length
self.y = 0
self.rotate(angle)
angle = property(get_angle, __setangle,
doc="""Gets or sets the angle (in radians) of a vector""")
def get_angle_degrees(self):
return math.degrees(self.get_angle())
def __set_angle_degrees(self, angle_degrees):
self.__setangle(math.radians(angle_degrees))
angle_degrees = property(get_angle_degrees, __set_angle_degrees,
doc="""Gets or sets the angle (in degrees) of a vector""")
def get_angle_between(self, other):
"""Get the angle between the vector and the other in radians
:return: The angle
"""
cross = self.x*other[1] - self.y*other[0]
dot = self.x*other[0] + self.y*other[1]
return math.atan2(cross, dot)
def get_angle_degrees_between(self, other):
"""Get the angle between the vector and the other in degrees
:return: The angle (in degrees)
"""
return math.degrees(self.get_angle_between(other))
def normalized(self):
"""Get a normalized copy of the vector
Note: This function will return 0 if the length of the vector is 0.
:return: A normalized vector
"""
length = self.length
if length != 0:
return self/length
return Vec2d(self)
def normalize_return_length(self):
"""Normalize the vector and return its length before the normalization
:return: The length before the normalization
"""
length = self.length
if length != 0:
self.x /= length
self.y /= length
return length
def perpendicular(self):
return Vec2d(-self.y, self.x)
def perpendicular_normal(self):
length = self.length
if length != 0:
return Vec2d(-self.y/length, self.x/length)
return Vec2d(self)
def dot(self, other):
"""The dot product between the vector and other vector
v1.dot(v2) -> v1.x*v2.x + v1.y*v2.y
:return: The dot product
"""
return float(self.x*other[0] + self.y*other[1])
def get_distance(self, other):
"""The distance between the vector and other vector
:return: The distance
"""
return math.sqrt((self.x - other[0])**2 + (self.y - other[1])**2)
def get_dist_sqrd(self, other):
"""The squared distance between the vector and other vector
It is more efficent to use this method than to call get_distance()
first and then do a sqrt() on the result.
:return: The squared distance
"""
return (self.x - other[0])**2 + (self.y - other[1])**2
def projection(self, other):
other_length_sqrd = other[0]*other[0] + other[1]*other[1]
projected_length_times_other_length = self.dot(other)
return other*(projected_length_times_other_length/other_length_sqrd)
def cross(self, other):
"""The cross product between the vector and other vector
v1.cross(v2) -> v1.x*v2.y - v2.y*v1.x
:return: The cross product
"""
return self.x*other[1] - self.y*other[0]
def interpolate_to(self, other, range):
return Vec2d(self.x + (other[0] - self.x)*range, self.y + (other[1] - self.y)*range)
def convert_to_basis(self, x_vector, y_vector):
x = self.dot(x_vector)/x_vector.get_length_sqrd()
y = self.dot(y_vector)/y_vector.get_length_sqrd()
return Vec2d(x, y)
def __get_int_xy(self):
return int(self.x), int(self.y)
int_tuple = property(__get_int_xy,
doc="""Return the x and y values of this vector as ints""")
@staticmethod
def zero():
"""A vector of zero length"""
return Vec2d(0, 0)
@staticmethod
def unit():
"""A unit vector pointing up"""
return Vec2d(0, 1)
@staticmethod
def ones():
"""A vector where both x and y is 1"""
return Vec2d(1, 1)
# Extra functions, mainly for chipmunk
def cpvrotate(self, other):
"""Uses complex multiplication to rotate this vector by the other. """
return Vec2d(self.x*other.x - self.y*other.y, self.x*other.y + self.y*other.x)
def cpvunrotate(self, other):
"""The inverse of cpvrotate"""
return Vec2d(self.x*other.x + self.y*other.y, self.y*other.x - self.x*other.y)
# Pickle
def __reduce__(self):
callable = Vec2d
args = (self.x, self.y)
return (callable, args)
Vec2d._fields_ = [
('x', float_type),
('y', float_type),
]
del float_type
| saintdragon2/python-3-lecture-2015 | civil_mid_mid/pymunk-4.0.0/pymunk/vec2d.py | Python | mit | 16,281 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Sewer.the_geom_length'
db.add_column('lizard_riool_sewer', 'the_geom_length',
self.gf('django.db.models.fields.FloatField')(default=0),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Sewer.the_geom_length'
db.delete_column('lizard_riool_sewer', 'the_geom_length')
models = {
'lizard_riool.manhole': {
'Meta': {'object_name': 'Manhole'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'ground_level': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sewerage': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_riool.Sewerage']"}),
'sink': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'the_geom': ('django.contrib.gis.db.models.fields.PointField', [], {})
},
'lizard_riool.put': {
'Meta': {'object_name': 'Put'},
'_CAA': ('django.db.models.fields.CharField', [], {'max_length': '30', 'db_column': "'caa'"}),
'_CAB': ('django.contrib.gis.db.models.fields.PointField', [], {'srid': '28992', 'db_column': "'cab'"}),
'_CAR': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'db_column': "'car'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'upload': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_riool.Upload']"})
},
'lizard_riool.riool': {
'Meta': {'object_name': 'Riool'},
'_AAA': ('django.db.models.fields.CharField', [], {'max_length': '30', 'db_column': "'aaa'"}),
'_AAD': ('django.db.models.fields.CharField', [], {'max_length': '30', 'db_column': "'aad'"}),
'_AAE': ('django.contrib.gis.db.models.fields.PointField', [], {'srid': '28992', 'db_column': "'aae'"}),
'_AAF': ('django.db.models.fields.CharField', [], {'max_length': '30', 'db_column': "'aaf'"}),
'_AAG': ('django.contrib.gis.db.models.fields.PointField', [], {'srid': '28992', 'db_column': "'aag'"}),
'_ACR': ('django.db.models.fields.FloatField', [], {'null': 'True', 'db_column': "'acr'"}),
'_ACS': ('django.db.models.fields.FloatField', [], {'null': 'True', 'db_column': "'acs'"}),
'_the_geom': ('django.contrib.gis.db.models.fields.LineStringField', [], {'srid': '28992', 'db_column': "'the_geom'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'upload': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_riool.Upload']"})
},
'lizard_riool.rioolmeting': {
'Meta': {'object_name': 'Rioolmeting'},
'ZYB': ('django.db.models.fields.CharField', [], {'max_length': '1', 'db_column': "'zyb'"}),
'ZYR': ('django.db.models.fields.CharField', [], {'max_length': '1', 'db_column': "'zyr'"}),
'ZYS': ('django.db.models.fields.CharField', [], {'max_length': '1', 'db_column': "'zys'"}),
'_ZYA': ('django.db.models.fields.FloatField', [], {'db_column': "'zya'"}),
'_ZYE': ('django.db.models.fields.CharField', [], {'max_length': '30', 'db_column': "'zye'"}),
'_ZYT': ('django.db.models.fields.FloatField', [], {'db_column': "'zyt'"}),
'_ZYU': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_column': "'zyu'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'upload': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_riool.Upload']"})
},
'lizard_riool.sewer': {
'Meta': {'object_name': 'Sewer'},
'bob1': ('django.db.models.fields.FloatField', [], {}),
'bob2': ('django.db.models.fields.FloatField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'diameter': ('django.db.models.fields.FloatField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'manhole1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['lizard_riool.Manhole']"}),
'manhole2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['lizard_riool.Manhole']"}),
'quality': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'sewerage': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_riool.Sewerage']"}),
'shape': ('django.db.models.fields.CharField', [], {'default': "'A'", 'max_length': '1'}),
'the_geom': ('django.contrib.gis.db.models.fields.LineStringField', [], {}),
'the_geom_length': ('django.db.models.fields.FloatField', [], {})
},
'lizard_riool.sewerage': {
'Meta': {'object_name': 'Sewerage'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'rib': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}),
'rmb': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'})
},
'lizard_riool.sewermeasurement': {
'Meta': {'object_name': 'SewerMeasurement'},
'bob': ('django.db.models.fields.FloatField', [], {}),
'dist': ('django.db.models.fields.FloatField', [], {}),
'flooded_pct': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'obb': ('django.db.models.fields.FloatField', [], {}),
'sewer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_riool.Sewer']"}),
'the_geom': ('django.contrib.gis.db.models.fields.PointField', [], {}),
'virtual': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'water_level': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'lizard_riool.sinkforupload': {
'Meta': {'object_name': 'SinkForUpload'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sink': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_riool.Put']"}),
'upload': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_riool.Upload']", 'unique': 'True'})
},
'lizard_riool.storedgraph': {
'Meta': {'unique_together': "(('rmb', 'x', 'y'),)", 'object_name': 'StoredGraph'},
'flooded_percentage': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rmb': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_riool.Upload']"}),
'suf_id': ('django.db.models.fields.CharField', [], {'max_length': '39'}),
'x': ('django.db.models.fields.FloatField', [], {}),
'xy': ('django.contrib.gis.db.models.fields.PointField', [], {'srid': '28992'}),
'y': ('django.db.models.fields.FloatField', [], {})
},
'lizard_riool.upload': {
'Meta': {'object_name': 'Upload'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True'}),
'the_file': ('django.db.models.fields.FilePathField', [], {'path': "'/home/byrman/Repo/almere-site/var/lizard_riool/uploads'", 'max_length': '100'}),
'the_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'lizard_riool.uploadedfileerror': {
'Meta': {'object_name': 'UploadedFileError'},
'error_message': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'uploaded_file': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_riool.Upload']"})
}
}
complete_apps = ['lizard_riool'] | lizardsystem/lizard-riool | lizard_riool/migrations/0016_auto__add_field_sewer_the_geom_length.py | Python | gpl-3.0 | 8,963 |
from datetime import timedelta
from flask import current_app
from app import performance_platform_client
from app.dao.notifications_dao import (
dao_get_total_notifications_sent_per_day_for_performance_platform,
)
from app.utils import get_london_midnight_in_utc
def send_processing_time_to_performance_platform(bst_date):
start_time = get_london_midnight_in_utc(bst_date)
end_time = get_london_midnight_in_utc(bst_date + timedelta(days=1))
send_processing_time_for_start_and_end(start_time, end_time, bst_date)
def send_processing_time_for_start_and_end(start_time, end_time, bst_date):
result = dao_get_total_notifications_sent_per_day_for_performance_platform(start_time, end_time)
current_app.logger.info(
'Sending processing-time to performance platform for date {}. Total: {}, under 10 secs {}'.format(
start_time, result.messages_total, result.messages_within_10_secs
)
)
send_processing_time_data(start_time, 'messages-total', result.messages_total)
send_processing_time_data(start_time, 'messages-within-10-secs', result.messages_within_10_secs)
def send_processing_time_data(start_time, status, count):
payload = performance_platform_client.format_payload(
dataset='processing-time',
start_time=start_time,
group_name='status',
group_value=status,
count=count
)
performance_platform_client.send_stats_to_performance_platform(payload)
| alphagov/notifications-api | app/performance_platform/processing_time.py | Python | mit | 1,472 |
import unittest
import Queue
import time
from walky.worker import *
from _common import *
class MyWorkerRequest(WorkerRequest):
def __init__(self):
self.q = []
def execute(self):
self.q.append("TEST")
class Test(unittest.TestCase):
def test_execute(self):
# Execute our test object
worker_obj = MyWorkerRequest()
self.assertIsInstance(worker_obj,MyWorkerRequest)
self.assertFalse(worker_obj.q)
worker_obj.execute()
self.assertTrue(worker_obj.q)
# Have our test object executed via queue injection
queue = Queue.Queue()
worker_obj = MyWorkerRequest()
workthread_obj = WorkerThread(queue)
self.assertIsInstance(workthread_obj,WorkerThread)
workthread_obj.start()
queue.put(worker_obj)
self.assertFalse(worker_obj.q)
time.sleep(0.2)
self.assertTrue(worker_obj.q)
workthread_obj.shutdown()
# Have our test object executed via the exec pool handler
crew_obj = WorkerCrew()
self.assertIsInstance(crew_obj,WorkerCrew)
crew_obj.start()
worker_obj = MyWorkerRequest()
self.assertFalse(worker_obj.q)
crew_obj.put(worker_obj)
time.sleep(0.2)
self.assertTrue(worker_obj.q)
crew_obj.shutdown()
# Way cool.
if __name__ == '__main__':
unittest.main()
| amimoto/walky | tests/067-workers.py | Python | mit | 1,412 |
from functools import wraps
from flask import request
from flask_login import current_user
from app.api.errors import forbidden, unauthorized
EXEMPT_METHODS = set(['OPTIONS'])
def permission_required(permission):
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if not current_user.can(permission):
return forbidden('Insufficient permissions')
return f(*args, **kwargs)
return decorated_function
return decorator
def login_required(func):
@wraps(func)
def decorated_view(*args, **kwargs):
if request.method in EXEMPT_METHODS:
return func(*args, **kwargs)
elif not current_user.is_authenticated:
return (
unauthorized('Invalid credentials'),
401,
{'WWW-Authenticate': 'Basic realm="Login Required"'}
)
return func(*args, **kwargs)
return decorated_view
| mapan1984/Hidden-Island | app/api/decorators.py | Python | mit | 973 |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import HTMLParser
import urllib
from frappe import msgprint, throw, _
from frappe.email.smtp import SMTPServer, get_outgoing_email_account
from frappe.email.email_body import get_email, get_formatted_html
from html2text import html2text
from frappe.utils import cint, get_url, nowdate
class BulkLimitCrossedError(frappe.ValidationError): pass
def send(recipients=None, sender=None, doctype='User', email_field='email',
subject='[No Subject]', message='[No Content]', ref_doctype=None,
ref_docname=None, add_unsubscribe_link=True, attachments=None, reply_to=None):
def is_unsubscribed(rdata):
if not rdata:
return 1
return cint(rdata.unsubscribed)
def check_bulk_limit(new_mails):
this_month = frappe.db.sql("""select count(*) from `tabBulk Email` where
month(creation)=month(%s)""" % nowdate())[0][0]
# No limit for own email settings
smtp_server = SMTPServer()
if smtp_server.email_account and not getattr(smtp_server.email_account,
"from_site_config", False):
monthly_bulk_mail_limit = frappe.conf.get('monthly_bulk_mail_limit') or 500
if (this_month + len(recipients)) > monthly_bulk_mail_limit:
throw(_("Bulk email limit {0} crossed").format(monthly_bulk_mail_limit),
BulkLimitCrossedError)
def update_message(formatted, doc, add_unsubscribe_link):
updated = formatted
if add_unsubscribe_link:
unsubscribe_link = """<div style="padding: 7px; border-top: 1px solid #aaa;
margin-top: 17px;">
<small><a href="%s/?%s">
Unsubscribe</a> from this list.</small></div>""" % (get_url(),
urllib.urlencode({
"cmd": "frappe.email.bulk.unsubscribe",
"email": doc.get(email_field),
"type": doctype,
"email_field": email_field
}))
updated = updated.replace("<!--unsubscribe link here-->", unsubscribe_link)
return updated
if not recipients:
recipients = []
if not sender or sender == "Administrator":
email_account = get_outgoing_email_account()
sender = email_account.get("sender") or email_account.email_id
check_bulk_limit(len(recipients))
formatted = get_formatted_html(subject, message)
for r in filter(None, list(set(recipients))):
rdata = frappe.db.sql("""select * from `tab%s` where %s=%s""" % (doctype,
email_field, '%s'), (r,), as_dict=1)
doc = rdata and rdata[0] or {}
if (not add_unsubscribe_link) or (not is_unsubscribed(doc)):
# add to queue
updated = update_message(formatted, doc, add_unsubscribe_link)
try:
text_content = html2text(updated)
except HTMLParser.HTMLParseError:
text_content = "[See html attachment]"
add(r, sender, subject, updated, text_content, ref_doctype, ref_docname, attachments, reply_to)
def add(email, sender, subject, formatted, text_content=None,
ref_doctype=None, ref_docname=None, attachments=None, reply_to=None):
"""add to bulk mail queue"""
e = frappe.new_doc('Bulk Email')
e.sender = sender
e.recipient = email
try:
e.message = get_email(email, sender=e.sender, formatted=formatted, subject=subject,
text_content=text_content, attachments=attachments, reply_to=reply_to).as_string()
except frappe.InvalidEmailAddressError:
# bad email id - don't add to queue
return
e.ref_doctype = ref_doctype
e.ref_docname = ref_docname
e.insert(ignore_permissions=True)
@frappe.whitelist(allow_guest=True)
def unsubscribe():
doctype = frappe.form_dict.get('type')
field = frappe.form_dict.get('email_field')
email = frappe.form_dict.get('email')
frappe.db.sql("""update `tab%s` set unsubscribed=1
where `%s`=%s""" % (doctype, field, '%s'), (email,))
if not frappe.form_dict.get("from_test"):
frappe.db.commit()
frappe.local.message_title = "Unsubscribe"
frappe.local.message = "<h3>Unsubscribed</h3><p>%s has been successfully unsubscribed.</p>" % email
frappe.response['type'] = 'page'
frappe.response['page_name'] = 'message.html'
def flush(from_test=False):
"""flush email queue, every time: called from scheduler"""
smtpserver = SMTPServer()
auto_commit = not from_test
if frappe.flags.mute_emails or frappe.conf.get("mute_emails") or False:
msgprint(_("Emails are muted"))
from_test = True
for i in xrange(500):
email = frappe.db.sql("""select * from `tabBulk Email` where
status='Not Sent' limit 1 for update""", as_dict=1)
if email:
email = email[0]
else:
break
frappe.db.sql("""update `tabBulk Email` set status='Sending' where name=%s""",
(email["name"],), auto_commit=auto_commit)
try:
if not from_test:
smtpserver.sess.sendmail(email["sender"], email["recipient"], email["message"])
frappe.db.sql("""update `tabBulk Email` set status='Sent' where name=%s""",
(email["name"],), auto_commit=auto_commit)
except Exception, e:
frappe.db.sql("""update `tabBulk Email` set status='Error', error=%s
where name=%s""", (unicode(e), email["name"]), auto_commit=auto_commit)
def clear_outbox():
"""remove mails older than 30 days in Outbox"""
frappe.db.sql("""delete from `tabBulk Email` where
datediff(now(), creation) > 30""")
| gangadharkadam/letzfrappe | frappe/email/bulk.py | Python | mit | 5,170 |
"""Determine the layer plane angle of all the elements in a grid.
Author: Perry Roth-Johnson
Last modified: April 30, 2014
Usage:
1. Look through the mesh_stnXX.abq file and find all the element set names.
(Find all the lines that start with "*ELSET".)
2. Enter each of the element set names in one of the four lists below:
(1) list_of_LE_elementsets
(2) list_of_TE_elementsets
(3) list_of_lower_elementsets
(4) list_of_upper_elementsets
3. Run this script. Visually inspect the plot to make sure each of the element
sets are in the correct list. (The blue edge should be facing outward,
relative to the airfoil centerpoint. The magenta edge should be facing
inward.) If you find an element that is oriented incorrectly, note the
element number, and look up it's element set name from the printout in the
IPython terminal. Then, move that element set name to a different list in
this script.
4. Repeat step 3 until your visual inspection suggests that all the edges (and
layer plane angles) are being assigned correctly.
References:
http://stackoverflow.com/questions/3365171/calculating-the-angle-between-two-lines-without-having-to-calculate-the-slope/3366569#3366569
http://stackoverflow.com/questions/19295725/angle-less-than-180-between-two-segments-lines
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import lib.grid as gr
reload(gr)
import lib.abaqus_utils2 as au
reload(au)
import lib.vabs_utils as vu
reload(vu)
import lib.blade as bl
from shapely.geometry import Polygon, LineString
from descartes import PolygonPatch
# -----------------------------------------------
# update these parameters!
station_num = 22
skip_num = 25 # plot every 'skip_num' elements (larger values plot faster)
IS4_resin_u2_tri_elem_num = 4796 # num of tri elem in int surf 3 resin upper 2
IS4_resin_l2_tri_elem_num = 4764 # num of tri elem in int surf 3 resin lower 2
TE_reinf_foam_u3_tri_elem_num = 1121 # num of tri elem in TE reinf foam upper 3
TE_reinf_foam_l3_tri_elem_num = 1082 # num of tri elem in TE reinf foam lower 3
# -----------------------------------------------
stn_str = 'stn{0:02d}'.format(station_num)
plt.close('all')
# load the biplane blade
b1 = bl.BiplaneBlade(
'biplane blade, flapwise symmetric, no stagger, rj/R=0.452, g/c=1.25',
'biplane_blade')
# pre-process the station dimensions
station = b1.list_of_stations[station_num-1]
st = station.structure
af = station.airfoil
af.create_polygon()
st.create_all_layers()
st.save_all_layer_edges()
st.write_all_part_polygons()
x3_off = af.lower_chord * af.gap_to_chord_ratio * af.gap_fraction
# plot the parts
station.plot_parts_offset(airfoil_to_plot='lower', x3_offset=x3_off)
# create a figure
ax = plt.gcf().gca()
# element sets on the leading edge
# outer_edge_node_nums=[1,4], inner_edge_node_nums=[2,3]
list_of_LE_elementsets = [
'lepanel',
'rbtrile',
'esgelle',
'estrile',
'is1rsle',
'is1trile',
'sw1biaxl',
'sw1foam',
'sw1biaxr',
'is1rlsw1',
'is1tlsw1',
'is2rrsw1',
'is2trsw1'
]
# element sets on the trailing edge
# outer_edge_node_nums=[3,2], inner_edge_node_nums=[4,1]
list_of_TE_elementsets = [
'is3reste',
'is3trite',
'sw2biaxl',
'sw2foam',
'sw2biaxr',
'sw3biaxl',
'sw3foam',
'sw3biaxr',
'is2rlsw2',
'is2tlsw2',
'is3rrsw2',
'is3trsw2',
'is3rlsw3',
'is3tlsw3',
'is4rrsw3',
'is4trsw3'
]
# element sets on the lower surface
# outer_edge_node_nums=[2,1], inner_edge_node_nums=[3,4]
list_of_lower_elementsets = [
'tefoaml1',
'tefoaml2',
'tefoaml3',
'teunil1',
'teunil2',
'teunil3',
'teunil4',
'ap1lower',
'ap2lower',
'esgllap1',
'estrlap1',
'rbtrlap1',
'is3rlap1',
'is3tlap1',
'esgllap2',
'estrlap2',
'is4rlap2',
'is4tlap2',
'sclower',
'rbtriscl',
'rbtrtel1',
'rbtrtel2',
'rbtrtel3',
'rbtrtel4',
'estrtel1',
'estrtel2',
'estrtel3',
'estrtel4',
'esgltel1',
'esgltel2',
'esgltel3',
'esgltel4',
'esgelscl',
'estriscl',
'is2rsscl',
'is2trscl',
'is4rtel1',
'is4rtel2',
'is4ttel1',
'is4ttel2',
'rbtrbsw1',
'esglbsw1',
'estrbsw1',
'esglbsw2',
'estrbsw2',
'rbtrbsw2',
'estrbsw3',
'esglbsw3'
]
# element sets on the upper surface
# outer_edge_node_nums=[4,3], inner_edge_node_nums=[1,2]
list_of_upper_elementsets = [
'tefoamu1',
'tefoamu2',
'tefoamu3',
'teuniu1',
'teuniu2',
'teuniu3',
'teuniu4',
'ap1upper',
'ap2upper',
'esgluap1',
'estruap1',
'rbtruap1',
'is3ruap1',
'is3tuap1',
'esgluap2',
'estruap2',
'is4ruap2',
'is4tuap2',
'is4rteu1',
'is4rteu2',
'is4tteu1',
'is4tteu2',
'esglasw1',
'estrasw1',
'rbtrasw1',
'rbtrteu1',
'rbtrteu2',
'rbtrteu3',
'rbtrteu4',
'estrteu1',
'estrteu2',
'estrteu3',
'estrteu4',
'esglteu1',
'esglteu2',
'esglteu3',
'esglteu4',
'esglasw2',
'estrasw2',
'rbtrasw2',
'esglasw3',
'estrasw3',
'scupper',
'rbtriscu',
'estriscu',
'is2rsscu',
'esgelscu',
'is2trscu'
]
# element sets of triangular elements on the lower surface
# outer_edge_node_nums=[2,1]
list_of_tri_lower_elementsets = [
'is4rtel2_tri',
'tefoaml3_tri'
]
# element sets of triangular elements on the upper surface
# outer_edge_node_nums=[3,2]
list_of_tri_upper_elementsets = [
'is4rteu2_tri',
'tefoamu3_tri'
]
# import the initial grid object
fmt_grid = 'biplane_blade/' + stn_str + '/mesh_' + stn_str + '.abq'
g = au.AbaqusGrid(fmt_grid, debug_flag=True)
# manually assign two triangular elements into new element sets
g.list_of_elements[IS4_resin_u2_tri_elem_num-1].element_set = 'is4rteu2_tri'
g.list_of_elements[IS4_resin_l2_tri_elem_num-1].element_set = 'is4rtel2_tri'
g.list_of_elements[TE_reinf_foam_u3_tri_elem_num-1].element_set = 'tefoamu3_tri'
g.list_of_elements[TE_reinf_foam_l3_tri_elem_num-1].element_set = 'tefoaml3_tri'
# update the grid object with all the layer plane angles
for elem in g.list_of_elements:
if elem.element_set in list_of_LE_elementsets:
elem.calculate_layer_plane_angle(outer_edge_node_nums=[1,4],
inner_edge_node_nums=[2,3])
elif elem.element_set in list_of_TE_elementsets:
elem.calculate_layer_plane_angle(outer_edge_node_nums=[3,2],
inner_edge_node_nums=[4,1])
elif elem.element_set in list_of_lower_elementsets:
elem.calculate_layer_plane_angle(outer_edge_node_nums=[2,1],
inner_edge_node_nums=[3,4])
elif elem.element_set in list_of_upper_elementsets:
elem.calculate_layer_plane_angle(outer_edge_node_nums=[4,3],
inner_edge_node_nums=[1,2])
elif elem.element_set in list_of_tri_lower_elementsets:
elem.calculate_layer_plane_angle(outer_edge_node_nums=[2,1])
elif elem.element_set in list_of_tri_upper_elementsets:
elem.calculate_layer_plane_angle(outer_edge_node_nums=[3,2])
else:
raise Warning("Element #{0} has no element set!".format(elem.elem_num))
# plot a small selection of elements to check the results
for elem in g.list_of_elements[::skip_num]:
elem.plot(label_nodes=False)
print elem.elem_num, elem.element_set, elem.theta1
g.list_of_elements[TE_reinf_foam_u3_tri_elem_num-1].plot()
g.list_of_elements[TE_reinf_foam_u3_tri_elem_num-2].plot()
g.list_of_elements[TE_reinf_foam_l3_tri_elem_num-1].plot()
g.list_of_elements[TE_reinf_foam_l3_tri_elem_num-2].plot()
g.list_of_elements[IS4_resin_u2_tri_elem_num-1].plot()
g.list_of_elements[IS4_resin_u2_tri_elem_num-2].plot()
g.list_of_elements[IS4_resin_l2_tri_elem_num-1].plot()
g.list_of_elements[IS4_resin_l2_tri_elem_num-2].plot()
# show the plot
plt.xlim([-3,5])
plt.ylim([-3,3])
ax.set_aspect('equal')
print ' ------------------------'
print ' LEGEND'
print ' magenta : inner edge'
print ' blue : outer edge'
print ' ------------------------'
plt.show()
# -----------------------------------------------------------------------------
# read layers.csv to determine the number of layers
layer_file = pd.read_csv('biplane_blade/layers.csv', index_col=0)
number_of_layers = len(layer_file)
# write the updated grid object to a VABS input file
fmt_vabs = 'biplane_blade/' + stn_str + '/mesh_' + stn_str + '.vabs'
f = vu.VabsInputFile(
vabs_filename=fmt_vabs,
grid=g,
material_filename='biplane_blade/materials.csv',
layer_filename='biplane_blade/layers.csv',
debug_flag=True,
flags={
'format' : 1,
'Timoshenko' : 1,
'recover' : 0,
'thermal' : 0,
'curve' : 0,
'oblique' : 0,
'trapeze' : 0,
'Vlasov' : 0
})
| perryjohnson/biplaneblade | biplane_blade_lib/layer_plane_angles_stn22.py | Python | gpl-3.0 | 9,221 |
"""
raven.transport.threaded
~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import atexit
import logging
import time
import threading
import os
from Queue import Queue
from raven.transport.base import HTTPTransport
DEFAULT_TIMEOUT = 10
logger = logging.getLogger('sentry.errors')
class AsyncWorker(object):
_terminator = object()
def __init__(self, shutdown_timeout=DEFAULT_TIMEOUT):
self._queue = Queue(-1)
self._lock = threading.Lock()
self._thread = None
self.options = {
'shutdown_timeout': shutdown_timeout,
}
self.start()
def main_thread_terminated(self):
size = self._queue.qsize()
if size:
timeout = self.options['shutdown_timeout']
print "Sentry is attempting to send %s pending error messages" % size
print "Waiting up to %s seconds" % timeout
if os.name == 'nt':
print "Press Ctrl-Break to quit"
else:
print "Press Ctrl-C to quit"
self.stop(timeout=timeout)
def start(self):
"""
Starts the task thread.
"""
self._lock.acquire()
try:
if not self._thread:
self._thread = threading.Thread(target=self._target)
self._thread.setDaemon(True)
self._thread.start()
finally:
self._lock.release()
atexit.register(self.main_thread_terminated)
def stop(self, timeout=None):
"""
Stops the task thread. Synchronous!
"""
self._lock.acquire()
try:
if self._thread:
self._queue.put_nowait(self._terminator)
self._thread.join(timeout=timeout)
self._thread = None
finally:
self._lock.release()
def queue(self, callback, *args, **kwargs):
self._queue.put_nowait((callback, args, kwargs))
def _target(self):
while 1:
record = self._queue.get()
if record is self._terminator:
break
callback, args, kwargs = record
try:
callback(*args, **kwargs)
except Exception:
logger.error('Failed processing job', exc_info=True)
time.sleep(0)
class ThreadedHTTPTransport(HTTPTransport):
scheme = ['threaded+http', 'threaded+https']
def __init__(self, parsed_url):
super(ThreadedHTTPTransport, self).__init__(parsed_url)
# remove the threaded+ from the protocol, as it is not a real protocol
self._url = self._url.split('+', 1)[-1]
def get_worker(self):
if not hasattr(self, '_worker'):
self._worker = AsyncWorker()
return self._worker
def send_sync(self, data, headers):
super(ThreadedHTTPTransport, self).send(data, headers)
def send(self, data, headers):
self.get_worker().queue(self.send_sync, data, headers)
| mozilla/verbatim | vendor/lib/python/raven/transport/threaded.py | Python | gpl-2.0 | 3,097 |
"""Get useful information from live Python objects.
This module encapsulates the interface provided by the internal special
attributes (co_*, im_*, tb_*, etc.) in a friendlier fashion.
It also provides some help for examining source code and class layout.
Here are some of the useful functions provided by this module:
ismodule(), isclass(), ismethod(), isfunction(), isgeneratorfunction(),
isgenerator(), istraceback(), isframe(), iscode(), isbuiltin(),
isroutine() - check object types
getmembers() - get members of an object that satisfy a given condition
getfile(), getsourcefile(), getsource() - find an object's source code
getdoc(), getcomments() - get documentation on an object
getmodule() - determine the module that an object came from
getclasstree() - arrange classes so as to represent their hierarchy
getargspec(), getargvalues(), getcallargs() - get info about function arguments
getfullargspec() - same, with support for Python-3000 features
formatargspec(), formatargvalues() - format an argument spec
getouterframes(), getinnerframes() - get info about frames
currentframe() - get the current stack frame
stack(), trace() - get info about frames on the stack or in a traceback
signature() - get a Signature object for the callable
"""
# This module is in the public domain. No warranties.
__author__ = ('Ka-Ping Yee <[email protected]>',
'Yury Selivanov <[email protected]>')
import ast
import importlib.machinery
import itertools
import linecache
import os
import re
import sys
import tokenize
import token
import types
import warnings
import functools
import builtins
from operator import attrgetter
from collections import namedtuple, OrderedDict
# Create constants for the compiler flags in Include/code.h
# We try to get them from dis to avoid duplication, but fall
# back to hardcoding so the dependency is optional
try:
from dis import COMPILER_FLAG_NAMES as _flag_names
except ImportError:
CO_OPTIMIZED, CO_NEWLOCALS = 0x1, 0x2
CO_VARARGS, CO_VARKEYWORDS = 0x4, 0x8
CO_NESTED, CO_GENERATOR, CO_NOFREE = 0x10, 0x20, 0x40
else:
mod_dict = globals()
for k, v in _flag_names.items():
mod_dict["CO_" + v] = k
# See Include/object.h
TPFLAGS_IS_ABSTRACT = 1 << 20
# ----------------------------------------------------------- type-checking
def ismodule(object):
"""Return true if the object is a module.
Module objects provide these attributes:
__cached__ pathname to byte compiled file
__doc__ documentation string
__file__ filename (missing for built-in modules)"""
return isinstance(object, types.ModuleType)
def isclass(object):
"""Return true if the object is a class.
Class objects provide these attributes:
__doc__ documentation string
__module__ name of module in which this class was defined"""
return isinstance(object, type)
def ismethod(object):
"""Return true if the object is an instance method.
Instance method objects provide these attributes:
__doc__ documentation string
__name__ name with which this method was defined
__func__ function object containing implementation of method
__self__ instance to which this method is bound"""
return isinstance(object, types.MethodType)
def ismethoddescriptor(object):
"""Return true if the object is a method descriptor.
But not if ismethod() or isclass() or isfunction() are true.
This is new in Python 2.2, and, for example, is true of int.__add__.
An object passing this test has a __get__ attribute but not a __set__
attribute, but beyond that the set of attributes varies. __name__ is
usually sensible, and __doc__ often is.
Methods implemented via descriptors that also pass one of the other
tests return false from the ismethoddescriptor() test, simply because
the other tests promise more -- you can, e.g., count on having the
__func__ attribute (etc) when an object passes ismethod()."""
if isclass(object) or ismethod(object) or isfunction(object):
# mutual exclusion
return False
tp = type(object)
return hasattr(tp, "__get__") and not hasattr(tp, "__set__")
def isdatadescriptor(object):
"""Return true if the object is a data descriptor.
Data descriptors have both a __get__ and a __set__ attribute. Examples are
properties (defined in Python) and getsets and members (defined in C).
Typically, data descriptors will also have __name__ and __doc__ attributes
(properties, getsets, and members have both of these attributes), but this
is not guaranteed."""
if isclass(object) or ismethod(object) or isfunction(object):
# mutual exclusion
return False
tp = type(object)
return hasattr(tp, "__set__") and hasattr(tp, "__get__")
if hasattr(types, 'MemberDescriptorType'):
# CPython and equivalent
def ismemberdescriptor(object):
"""Return true if the object is a member descriptor.
Member descriptors are specialized descriptors defined in extension
modules."""
return isinstance(object, types.MemberDescriptorType)
else:
# Other implementations
def ismemberdescriptor(object):
"""Return true if the object is a member descriptor.
Member descriptors are specialized descriptors defined in extension
modules."""
return False
if hasattr(types, 'GetSetDescriptorType'):
# CPython and equivalent
def isgetsetdescriptor(object):
"""Return true if the object is a getset descriptor.
getset descriptors are specialized descriptors defined in extension
modules."""
return isinstance(object, types.GetSetDescriptorType)
else:
# Other implementations
def isgetsetdescriptor(object):
"""Return true if the object is a getset descriptor.
getset descriptors are specialized descriptors defined in extension
modules."""
return False
def isfunction(object):
"""Return true if the object is a user-defined function.
Function objects provide these attributes:
__doc__ documentation string
__name__ name with which this function was defined
__code__ code object containing compiled function bytecode
__defaults__ tuple of any default values for arguments
__globals__ global namespace in which this function was defined
__annotations__ dict of parameter annotations
__kwdefaults__ dict of keyword only parameters with defaults"""
return isinstance(object, types.FunctionType)
def isgeneratorfunction(object):
"""Return true if the object is a user-defined generator function.
Generator function objects provides same attributes as functions.
See help(isfunction) for attributes listing."""
return bool((isfunction(object) or ismethod(object)) and
object.__code__.co_flags & CO_GENERATOR)
def isgenerator(object):
"""Return true if the object is a generator.
Generator objects provide these attributes:
__iter__ defined to support iteration over container
close raises a new GeneratorExit exception inside the
generator to terminate the iteration
gi_code code object
gi_frame frame object or possibly None once the generator has
been exhausted
gi_running set to 1 when generator is executing, 0 otherwise
next return the next item from the container
send resumes the generator and "sends" a value that becomes
the result of the current yield-expression
throw used to raise an exception inside the generator"""
return isinstance(object, types.GeneratorType)
def istraceback(object):
"""Return true if the object is a traceback.
Traceback objects provide these attributes:
tb_frame frame object at this level
tb_lasti index of last attempted instruction in bytecode
tb_lineno current line number in Python source code
tb_next next inner traceback object (called by this level)"""
return isinstance(object, types.TracebackType)
def isframe(object):
"""Return true if the object is a frame object.
Frame objects provide these attributes:
f_back next outer frame object (this frame's caller)
f_builtins built-in namespace seen by this frame
f_code code object being executed in this frame
f_globals global namespace seen by this frame
f_lasti index of last attempted instruction in bytecode
f_lineno current line number in Python source code
f_locals local namespace seen by this frame
f_trace tracing function for this frame, or None"""
return isinstance(object, types.FrameType)
def iscode(object):
"""Return true if the object is a code object.
Code objects provide these attributes:
co_argcount number of arguments (not including * or ** args)
co_code string of raw compiled bytecode
co_consts tuple of constants used in the bytecode
co_filename name of file in which this code object was created
co_firstlineno number of first line in Python source code
co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg
co_lnotab encoded mapping of line numbers to bytecode indices
co_name name with which this code object was defined
co_names tuple of names of local variables
co_nlocals number of local variables
co_stacksize virtual machine stack space required
co_varnames tuple of names of arguments and local variables"""
return isinstance(object, types.CodeType)
def isbuiltin(object):
"""Return true if the object is a built-in function or method.
Built-in functions and methods provide these attributes:
__doc__ documentation string
__name__ original name of this function or method
__self__ instance to which a method is bound, or None"""
return isinstance(object, types.BuiltinFunctionType)
def isroutine(object):
"""Return true if the object is any kind of function or method."""
return (isbuiltin(object)
or isfunction(object)
or ismethod(object)
or ismethoddescriptor(object))
def isabstract(object):
"""Return true if the object is an abstract base class (ABC)."""
return bool(isinstance(object, type) and object.__flags__ & TPFLAGS_IS_ABSTRACT)
def getmembers(object, predicate=None):
"""Return all members of an object as (name, value) pairs sorted by name.
Optionally, only return members that satisfy a given predicate."""
if isclass(object):
mro = (object,) + getmro(object)
else:
mro = ()
results = []
processed = set()
names = dir(object)
# :dd any DynamicClassAttributes to the list of names if object is a class;
# this may result in duplicate entries if, for example, a virtual
# attribute with the same name as a DynamicClassAttribute exists
try:
for base in object.__bases__:
for k, v in base.__dict__.items():
if isinstance(v, types.DynamicClassAttribute):
names.append(k)
except AttributeError:
pass
for key in names:
# First try to get the value via getattr. Some descriptors don't
# like calling their __get__ (see bug #1785), so fall back to
# looking in the __dict__.
try:
value = getattr(object, key)
# handle the duplicate key
if key in processed:
raise AttributeError
except AttributeError:
for base in mro:
if key in base.__dict__:
value = base.__dict__[key]
break
else:
# could be a (currently) missing slot member, or a buggy
# __dir__; discard and move on
continue
if not predicate or predicate(value):
results.append((key, value))
processed.add(key)
results.sort(key=lambda pair: pair[0])
return results
Attribute = namedtuple('Attribute', 'name kind defining_class object')
def classify_class_attrs(cls):
"""Return list of attribute-descriptor tuples.
For each name in dir(cls), the return list contains a 4-tuple
with these elements:
0. The name (a string).
1. The kind of attribute this is, one of these strings:
'class method' created via classmethod()
'static method' created via staticmethod()
'property' created via property()
'method' any other flavor of method or descriptor
'data' not a method
2. The class which defined this attribute (a class).
3. The object as obtained by calling getattr; if this fails, or if the
resulting object does not live anywhere in the class' mro (including
metaclasses) then the object is looked up in the defining class's
dict (found by walking the mro).
If one of the items in dir(cls) is stored in the metaclass it will now
be discovered and not have None be listed as the class in which it was
defined. Any items whose home class cannot be discovered are skipped.
"""
mro = getmro(cls)
metamro = getmro(type(cls)) # for attributes stored in the metaclass
metamro = tuple([cls for cls in metamro if cls not in (type, object)])
class_bases = (cls,) + mro
all_bases = class_bases + metamro
names = dir(cls)
# :dd any DynamicClassAttributes to the list of names;
# this may result in duplicate entries if, for example, a virtual
# attribute with the same name as a DynamicClassAttribute exists.
for base in mro:
for k, v in base.__dict__.items():
if isinstance(v, types.DynamicClassAttribute):
names.append(k)
result = []
processed = set()
for name in names:
# Get the object associated with the name, and where it was defined.
# Normal objects will be looked up with both getattr and directly in
# its class' dict (in case getattr fails [bug #1785], and also to look
# for a docstring).
# For DynamicClassAttributes on the second pass we only look in the
# class's dict.
#
# Getting an obj from the __dict__ sometimes reveals more than
# using getattr. Static and class methods are dramatic examples.
homecls = None
get_obj = None
dict_obj = None
if name not in processed:
try:
if name == '__dict__':
raise Exception("__dict__ is special, don't want the proxy")
get_obj = getattr(cls, name)
except Exception as exc:
pass
else:
homecls = getattr(get_obj, "__objclass__", homecls)
if homecls not in class_bases:
# if the resulting object does not live somewhere in the
# mro, drop it and search the mro manually
homecls = None
last_cls = None
# first look in the classes
for srch_cls in class_bases:
srch_obj = getattr(srch_cls, name, None)
if srch_obj == get_obj:
last_cls = srch_cls
# then check the metaclasses
for srch_cls in metamro:
try:
srch_obj = srch_cls.__getattr__(cls, name)
except AttributeError:
continue
if srch_obj == get_obj:
last_cls = srch_cls
if last_cls is not None:
homecls = last_cls
for base in all_bases:
if name in base.__dict__:
dict_obj = base.__dict__[name]
if homecls not in metamro:
homecls = base
break
if homecls is None:
# unable to locate the attribute anywhere, most likely due to
# buggy custom __dir__; discard and move on
continue
obj = get_obj or dict_obj
# Classify the object or its descriptor.
if isinstance(dict_obj, staticmethod):
kind = "static method"
obj = dict_obj
elif isinstance(dict_obj, classmethod):
kind = "class method"
obj = dict_obj
elif isinstance(dict_obj, property):
kind = "property"
obj = dict_obj
elif isroutine(obj):
kind = "method"
else:
kind = "data"
result.append(Attribute(name, kind, homecls, obj))
processed.add(name)
return result
# ----------------------------------------------------------- class helpers
def getmro(cls):
"Return tuple of base classes (including cls) in method resolution order."
return cls.__mro__
# -------------------------------------------------------- function helpers
def unwrap(func, *, stop=None):
"""Get the object wrapped by *func*.
Follows the chain of :attr:`__wrapped__` attributes returning the last
object in the chain.
*stop* is an optional callback accepting an object in the wrapper chain
as its sole argument that allows the unwrapping to be terminated early if
the callback returns a true value. If the callback never returns a true
value, the last object in the chain is returned as usual. For example,
:func:`signature` uses this to stop unwrapping if any object in the
chain has a ``__signature__`` attribute defined.
:exc:`ValueError` is raised if a cycle is encountered.
"""
if stop is None:
def _is_wrapper(f):
return hasattr(f, '__wrapped__')
else:
def _is_wrapper(f):
return hasattr(f, '__wrapped__') and not stop(f)
f = func # remember the original func for error reporting
memo = {id(f)} # Memoise by id to tolerate non-hashable objects
while _is_wrapper(func):
func = func.__wrapped__
id_func = id(func)
if id_func in memo:
raise ValueError('wrapper loop when unwrapping {!r}'.format(f))
memo.add(id_func)
return func
# -------------------------------------------------- source code extraction
def indentsize(line):
"""Return the indent size, in spaces, at the start of a line of text."""
expline = line.expandtabs()
return len(expline) - len(expline.lstrip())
def getdoc(object):
"""Get the documentation string for an object.
All tabs are expanded to spaces. To clean up docstrings that are
indented to line up with blocks of code, any whitespace than can be
uniformly removed from the second line onwards is removed."""
try:
doc = object.__doc__
except AttributeError:
return None
if not isinstance(doc, str):
return None
return cleandoc(doc)
def cleandoc(doc):
"""Clean up indentation from docstrings.
Any whitespace that can be uniformly removed from the second line
onwards is removed."""
try:
lines = doc.expandtabs().split('\n')
except UnicodeError:
return None
else:
# Find minimum indentation of any non-blank lines after first line.
margin = sys.maxsize
for line in lines[1:]:
content = len(line.lstrip())
if content:
indent = len(line) - content
margin = min(margin, indent)
# Remove indentation.
if lines:
lines[0] = lines[0].lstrip()
if margin < sys.maxsize:
for i in range(1, len(lines)): lines[i] = lines[i][margin:]
# Remove any trailing or leading blank lines.
while lines and not lines[-1]:
lines.pop()
while lines and not lines[0]:
lines.pop(0)
return '\n'.join(lines)
def getfile(object):
"""Work out which source or compiled file an object was defined in."""
if ismodule(object):
if hasattr(object, '__file__'):
return object.__file__
raise TypeError('{!r} is a built-in module'.format(object))
if isclass(object):
if hasattr(object, '__module__'):
object = sys.modules.get(object.__module__)
if hasattr(object, '__file__'):
return object.__file__
raise TypeError('{!r} is a built-in class'.format(object))
if ismethod(object):
object = object.__func__
if isfunction(object):
object = object.__code__
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
return object.co_filename
raise TypeError('{!r} is not a module, class, method, '
'function, traceback, frame, or code object'.format(object))
ModuleInfo = namedtuple('ModuleInfo', 'name suffix mode module_type')
def getmoduleinfo(path):
"""Get the module name, suffix, mode, and module type for a given file."""
warnings.warn('inspect.getmoduleinfo() is deprecated', DeprecationWarning,
2)
with warnings.catch_warnings():
warnings.simplefilter('ignore', PendingDeprecationWarning)
import imp
filename = os.path.basename(path)
suffixes = [(-len(suffix), suffix, mode, mtype)
for suffix, mode, mtype in imp.get_suffixes()]
suffixes.sort() # try longest suffixes first, in case they overlap
for neglen, suffix, mode, mtype in suffixes:
if filename[neglen:] == suffix:
return ModuleInfo(filename[:neglen], suffix, mode, mtype)
def getmodulename(path):
"""Return the module name for a given file, or None."""
fname = os.path.basename(path)
# Check for paths that look like an actual module file
suffixes = [(-len(suffix), suffix)
for suffix in importlib.machinery.all_suffixes()]
suffixes.sort() # try longest suffixes first, in case they overlap
for neglen, suffix in suffixes:
if fname.endswith(suffix):
return fname[:neglen]
return None
def getsourcefile(object):
"""Return the filename that can be used to locate an object's source.
Return None if no way can be identified to get the source.
"""
filename = getfile(object)
all_bytecode_suffixes = importlib.machinery.DEBUG_BYTECODE_SUFFIXES[:]
all_bytecode_suffixes += importlib.machinery.OPTIMIZED_BYTECODE_SUFFIXES[:]
if any(filename.endswith(s) for s in all_bytecode_suffixes):
filename = (os.path.splitext(filename)[0] +
importlib.machinery.SOURCE_SUFFIXES[0])
elif any(filename.endswith(s) for s in
importlib.machinery.EXTENSION_SUFFIXES):
return None
if os.path.exists(filename):
return filename
# only return a non-existent filename if the module has a PEP 302 loader
if getattr(getmodule(object, filename), '__loader__', None) is not None:
return filename
# or it is in the linecache
if filename in linecache.cache:
return filename
def getabsfile(object, _filename=None):
"""Return an absolute path to the source or compiled file for an object.
The idea is for each object to have a unique origin, so this routine
normalizes the result as much as possible."""
if _filename is None:
_filename = getsourcefile(object) or getfile(object)
return os.path.normcase(os.path.abspath(_filename))
modulesbyfile = {}
_filesbymodname = {}
def getmodule(object, _filename=None):
"""Return the module an object was defined in, or None if not found."""
if ismodule(object):
return object
if hasattr(object, '__module__'):
return sys.modules.get(object.__module__)
# Try the filename to modulename cache
if _filename is not None and _filename in modulesbyfile:
return sys.modules.get(modulesbyfile[_filename])
# Try the cache again with the absolute file name
try:
file = getabsfile(object, _filename)
except TypeError:
return None
if file in modulesbyfile:
return sys.modules.get(modulesbyfile[file])
# Update the filename to module name cache and check yet again
# Copy sys.modules in order to cope with changes while iterating
for modname, module in list(sys.modules.items()):
if ismodule(module) and hasattr(module, '__file__'):
f = module.__file__
if f == _filesbymodname.get(modname, None):
# Have already mapped this module, so skip it
continue
_filesbymodname[modname] = f
f = getabsfile(module)
# Always map to the name the module knows itself by
modulesbyfile[f] = modulesbyfile[
os.path.realpath(f)] = module.__name__
if file in modulesbyfile:
return sys.modules.get(modulesbyfile[file])
# Check the main module
main = sys.modules['__main__']
if not hasattr(object, '__name__'):
return None
if hasattr(main, object.__name__):
mainobject = getattr(main, object.__name__)
if mainobject is object:
return main
# Check builtins
builtin = sys.modules['builtins']
if hasattr(builtin, object.__name__):
builtinobject = getattr(builtin, object.__name__)
if builtinobject is object:
return builtin
def findsource(object):
"""Return the entire source file and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of all the lines
in the file and the line number indexes a line in that list. An OSError
is raised if the source code cannot be retrieved."""
file = getfile(object)
sourcefile = getsourcefile(object)
if not sourcefile and file[:1] + file[-1:] != '<>':
raise OSError('source code not available')
file = sourcefile if sourcefile else file
module = getmodule(object, file)
if module:
lines = linecache.getlines(file, module.__dict__)
else:
lines = linecache.getlines(file)
if not lines:
raise OSError('could not get source code')
if ismodule(object):
return lines, 0
if isclass(object):
name = object.__name__
pat = re.compile(r'^(\s*)class\s*' + name + r'\b')
# make some effort to find the best matching class definition:
# use the one with the least indentation, which is the one
# that's most probably not inside a function definition.
candidates = []
for i in range(len(lines)):
match = pat.match(lines[i])
if match:
# if it's at toplevel, it's already the best one
if lines[i][0] == 'c':
return lines, i
# else add whitespace to candidate list
candidates.append((match.group(1), i))
if candidates:
# this will sort by whitespace, and by line number,
# less whitespace first
candidates.sort()
return lines, candidates[0][1]
else:
raise OSError('could not find class definition')
if ismethod(object):
object = object.__func__
if isfunction(object):
object = object.__code__
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
if not hasattr(object, 'co_firstlineno'):
raise OSError('could not find function definition')
lnum = object.co_firstlineno - 1
pat = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
while lnum > 0:
if pat.match(lines[lnum]): break
lnum = lnum - 1
return lines, lnum
raise OSError('could not find code object')
def getcomments(object):
"""Get lines of comments immediately preceding an object's source code.
Returns None when source can't be found.
"""
try:
lines, lnum = findsource(object)
except (OSError, TypeError):
return None
if ismodule(object):
# Look for a comment block at the top of the file.
start = 0
if lines and lines[0][:2] == '#!': start = 1
while start < len(lines) and lines[start].strip() in ('', '#'):
start = start + 1
if start < len(lines) and lines[start][:1] == '#':
comments = []
end = start
while end < len(lines) and lines[end][:1] == '#':
comments.append(lines[end].expandtabs())
end = end + 1
return ''.join(comments)
# Look for a preceding block of comments at the same indentation.
elif lnum > 0:
indent = indentsize(lines[lnum])
end = lnum - 1
if end >= 0 and lines[end].lstrip()[:1] == '#' and \
indentsize(lines[end]) == indent:
comments = [lines[end].expandtabs().lstrip()]
if end > 0:
end = end - 1
comment = lines[end].expandtabs().lstrip()
while comment[:1] == '#' and indentsize(lines[end]) == indent:
comments[:0] = [comment]
end = end - 1
if end < 0: break
comment = lines[end].expandtabs().lstrip()
while comments and comments[0].strip() == '#':
comments[:1] = []
while comments and comments[-1].strip() == '#':
comments[-1:] = []
return ''.join(comments)
class EndOfBlock(Exception): pass
class BlockFinder:
"""Provide a tokeneater() method to detect the end of a code block."""
def __init__(self):
self.indent = 0
self.islambda = False
self.started = False
self.passline = False
self.last = 1
def tokeneater(self, type, token, srowcol, erowcol, line):
if not self.started:
# look for the first "def", "class" or "lambda"
if token in ("def", "class", "lambda"):
if token == "lambda":
self.islambda = True
self.started = True
self.passline = True # skip to the end of the line
elif type == tokenize.NEWLINE:
self.passline = False # stop skipping when a NEWLINE is seen
self.last = srowcol[0]
if self.islambda: # lambdas always end at the first NEWLINE
raise EndOfBlock
elif self.passline:
pass
elif type == tokenize.INDENT:
self.indent = self.indent + 1
self.passline = True
elif type == tokenize.DEDENT:
self.indent = self.indent - 1
# the end of matching indent/dedent pairs end a block
# (note that this only works for "def"/"class" blocks,
# not e.g. for "if: else:" or "try: finally:" blocks)
if self.indent <= 0:
raise EndOfBlock
elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL):
# any other token on the same indentation level end the previous
# block as well, except the pseudo-tokens COMMENT and NL.
raise EndOfBlock
def getblock(lines):
"""Extract the block of code at the top of the given list of lines."""
blockfinder = BlockFinder()
try:
tokens = tokenize.generate_tokens(iter(lines).__next__)
for _token in tokens:
blockfinder.tokeneater(*_token)
except (EndOfBlock, IndentationError):
pass
return lines[:blockfinder.last]
def getsourcelines(object):
"""Return a list of source lines and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of the lines
corresponding to the object and the line number indicates where in the
original source file the first line of code was found. An OSError is
raised if the source code cannot be retrieved."""
lines, lnum = findsource(object)
if ismodule(object): return lines, 0
else: return getblock(lines[lnum:]), lnum + 1
def getsource(object):
"""Return the text of the source code for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a single string. An
OSError is raised if the source code cannot be retrieved."""
lines, lnum = getsourcelines(object)
return ''.join(lines)
# --------------------------------------------------- class tree extraction
def walktree(classes, children, parent):
"""Recursive helper function for getclasstree()."""
results = []
classes.sort(key=attrgetter('__module__', '__name__'))
for c in classes:
results.append((c, c.__bases__))
if c in children:
results.append(walktree(children[c], children, c))
return results
def getclasstree(classes, unique=False):
"""Arrange the given list of classes into a hierarchy of nested lists.
Where a nested list appears, it contains classes derived from the class
whose entry immediately precedes the list. Each entry is a 2-tuple
containing a class and a tuple of its base classes. If the 'unique'
argument is true, exactly one entry appears in the returned structure
for each class in the given list. Otherwise, classes using multiple
inheritance and their descendants will appear multiple times."""
children = {}
roots = []
for c in classes:
if c.__bases__:
for parent in c.__bases__:
if not parent in children:
children[parent] = []
if c not in children[parent]:
children[parent].append(c)
if unique and parent in classes: break
elif c not in roots:
roots.append(c)
for parent in children:
if parent not in classes:
roots.append(parent)
return walktree(roots, children, None)
# ------------------------------------------------ argument list extraction
Arguments = namedtuple('Arguments', 'args, varargs, varkw')
def getargs(co):
"""Get information about the arguments accepted by a code object.
Three things are returned: (args, varargs, varkw), where
'args' is the list of argument names. Keyword-only arguments are
appended. 'varargs' and 'varkw' are the names of the * and **
arguments or None."""
args, varargs, kwonlyargs, varkw = _getfullargs(co)
return Arguments(args + kwonlyargs, varargs, varkw)
def _getfullargs(co):
"""Get information about the arguments accepted by a code object.
Four things are returned: (args, varargs, kwonlyargs, varkw), where
'args' and 'kwonlyargs' are lists of argument names, and 'varargs'
and 'varkw' are the names of the * and ** arguments or None."""
if not iscode(co):
raise TypeError('{!r} is not a code object'.format(co))
nargs = co.co_argcount
names = co.co_varnames
nkwargs = co.co_kwonlyargcount
args = list(names[:nargs])
kwonlyargs = list(names[nargs:nargs+nkwargs])
step = 0
nargs += nkwargs
varargs = None
if co.co_flags & CO_VARARGS:
varargs = co.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if co.co_flags & CO_VARKEYWORDS:
varkw = co.co_varnames[nargs]
return args, varargs, kwonlyargs, varkw
ArgSpec = namedtuple('ArgSpec', 'args varargs keywords defaults')
def getargspec(func):
"""Get the names and default values of a function's arguments.
A tuple of four things is returned: (args, varargs, varkw, defaults).
'args' is a list of the argument names.
'args' will include keyword-only argument names.
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'defaults' is an n-tuple of the default values of the last n arguments.
Use the getfullargspec() API for Python-3000 code, as annotations
and keyword arguments are supported. getargspec() will raise ValueError
if the func has either annotations or keyword arguments.
"""
args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = \
getfullargspec(func)
if kwonlyargs or ann:
raise ValueError("Function has keyword-only arguments or annotations"
", use getfullargspec() API which can support them")
return ArgSpec(args, varargs, varkw, defaults)
FullArgSpec = namedtuple('FullArgSpec',
'args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations')
def getfullargspec(func):
"""Get the names and default values of a callable object's arguments.
A tuple of seven things is returned:
(args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults annotations).
'args' is a list of the argument names.
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'defaults' is an n-tuple of the default values of the last n arguments.
'kwonlyargs' is a list of keyword-only argument names.
'kwonlydefaults' is a dictionary mapping names from kwonlyargs to defaults.
'annotations' is a dictionary mapping argument names to annotations.
The first four items in the tuple correspond to getargspec().
"""
try:
# Re: `skip_bound_arg=False`
#
# There is a notable difference in behaviour between getfullargspec
# and Signature: the former always returns 'self' parameter for bound
# methods, whereas the Signature always shows the actual calling
# signature of the passed object.
#
# To simulate this behaviour, we "unbind" bound methods, to trick
# inspect.signature to always return their first parameter ("self",
# usually)
# Re: `follow_wrapper_chains=False`
#
# getfullargspec() historically ignored __wrapped__ attributes,
# so we ensure that remains the case in 3.3+
sig = _signature_internal(func,
follow_wrapper_chains=False,
skip_bound_arg=False)
except Exception as ex:
# Most of the times 'signature' will raise ValueError.
# But, it can also raise AttributeError, and, maybe something
# else. So to be fully backwards compatible, we catch all
# possible exceptions here, and reraise a TypeError.
raise TypeError('unsupported callable') from ex
args = []
varargs = None
varkw = None
kwonlyargs = []
defaults = ()
annotations = {}
defaults = ()
kwdefaults = {}
if sig.return_annotation is not sig.empty:
annotations['return'] = sig.return_annotation
for param in sig.parameters.values():
kind = param.kind
name = param.name
if kind is _POSITIONAL_ONLY:
args.append(name)
elif kind is _POSITIONAL_OR_KEYWORD:
args.append(name)
if param.default is not param.empty:
defaults += (param.default,)
elif kind is _VAR_POSITIONAL:
varargs = name
elif kind is _KEYWORD_ONLY:
kwonlyargs.append(name)
if param.default is not param.empty:
kwdefaults[name] = param.default
elif kind is _VAR_KEYWORD:
varkw = name
if param.annotation is not param.empty:
annotations[name] = param.annotation
if not kwdefaults:
# compatibility with 'func.__kwdefaults__'
kwdefaults = None
if not defaults:
# compatibility with 'func.__defaults__'
defaults = None
return FullArgSpec(args, varargs, varkw, defaults,
kwonlyargs, kwdefaults, annotations)
ArgInfo = namedtuple('ArgInfo', 'args varargs keywords locals')
def getargvalues(frame):
"""Get information about arguments passed into a particular frame.
A tuple of four things is returned: (args, varargs, varkw, locals).
'args' is a list of the argument names.
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'locals' is the locals dictionary of the given frame."""
args, varargs, varkw = getargs(frame.f_code)
return ArgInfo(args, varargs, varkw, frame.f_locals)
def formatannotation(annotation, base_module=None):
if isinstance(annotation, type):
if annotation.__module__ in ('builtins', base_module):
return annotation.__name__
return annotation.__module__+'.'+annotation.__name__
return repr(annotation)
def formatannotationrelativeto(object):
module = getattr(object, '__module__', None)
def _formatannotation(annotation):
return formatannotation(annotation, module)
return _formatannotation
def formatargspec(args, varargs=None, varkw=None, defaults=None,
kwonlyargs=(), kwonlydefaults={}, annotations={},
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
formatreturns=lambda text: ' -> ' + text,
formatannotation=formatannotation):
"""Format an argument spec from the values returned by getargspec
or getfullargspec.
The first seven arguments are (args, varargs, varkw, defaults,
kwonlyargs, kwonlydefaults, annotations). The other five arguments
are the corresponding optional formatting functions that are called to
turn names and values into strings. The last argument is an optional
function to format the sequence of arguments."""
def formatargandannotation(arg):
result = formatarg(arg)
if arg in annotations:
result += ': ' + formatannotation(annotations[arg])
return result
specs = []
if defaults:
firstdefault = len(args) - len(defaults)
for i, arg in enumerate(args):
spec = formatargandannotation(arg)
if defaults and i >= firstdefault:
spec = spec + formatvalue(defaults[i - firstdefault])
specs.append(spec)
if varargs is not None:
specs.append(formatvarargs(formatargandannotation(varargs)))
else:
if kwonlyargs:
specs.append('*')
if kwonlyargs:
for kwonlyarg in kwonlyargs:
spec = formatargandannotation(kwonlyarg)
if kwonlydefaults and kwonlyarg in kwonlydefaults:
spec += formatvalue(kwonlydefaults[kwonlyarg])
specs.append(spec)
if varkw is not None:
specs.append(formatvarkw(formatargandannotation(varkw)))
result = '(' + ', '.join(specs) + ')'
if 'return' in annotations:
result += formatreturns(formatannotation(annotations['return']))
return result
def formatargvalues(args, varargs, varkw, locals,
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value)):
"""Format an argument spec from the 4 values returned by getargvalues.
The first four arguments are (args, varargs, varkw, locals). The
next four arguments are the corresponding optional formatting functions
that are called to turn names and values into strings. The ninth
argument is an optional function to format the sequence of arguments."""
def convert(name, locals=locals,
formatarg=formatarg, formatvalue=formatvalue):
return formatarg(name) + formatvalue(locals[name])
specs = []
for i in range(len(args)):
specs.append(convert(args[i]))
if varargs:
specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))
if varkw:
specs.append(formatvarkw(varkw) + formatvalue(locals[varkw]))
return '(' + ', '.join(specs) + ')'
def _missing_arguments(f_name, argnames, pos, values):
names = [repr(name) for name in argnames if name not in values]
missing = len(names)
if missing == 1:
s = names[0]
elif missing == 2:
s = "{} and {}".format(*names)
else:
tail = ", {} and {}".format(*names[-2:])
del names[-2:]
s = ", ".join(names) + tail
raise TypeError("%s() missing %i required %s argument%s: %s" %
(f_name, missing,
"positional" if pos else "keyword-only",
"" if missing == 1 else "s", s))
def _too_many(f_name, args, kwonly, varargs, defcount, given, values):
atleast = len(args) - defcount
kwonly_given = len([arg for arg in kwonly if arg in values])
if varargs:
plural = atleast != 1
sig = "at least %d" % (atleast,)
elif defcount:
plural = True
sig = "from %d to %d" % (atleast, len(args))
else:
plural = len(args) != 1
sig = str(len(args))
kwonly_sig = ""
if kwonly_given:
msg = " positional argument%s (and %d keyword-only argument%s)"
kwonly_sig = (msg % ("s" if given != 1 else "", kwonly_given,
"s" if kwonly_given != 1 else ""))
raise TypeError("%s() takes %s positional argument%s but %d%s %s given" %
(f_name, sig, "s" if plural else "", given, kwonly_sig,
"was" if given == 1 and not kwonly_given else "were"))
def getcallargs(*func_and_positional, **named):
"""Get the mapping of arguments to values.
A dict is returned, with keys the function argument names (including the
names of the * and ** arguments, if any), and values the respective bound
values from 'positional' and 'named'."""
func = func_and_positional[0]
positional = func_and_positional[1:]
spec = getfullargspec(func)
args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = spec
f_name = func.__name__
arg2value = {}
if ismethod(func) and func.__self__ is not None:
# implicit 'self' (or 'cls' for classmethods) argument
positional = (func.__self__,) + positional
num_pos = len(positional)
num_args = len(args)
num_defaults = len(defaults) if defaults else 0
n = min(num_pos, num_args)
for i in range(n):
arg2value[args[i]] = positional[i]
if varargs:
arg2value[varargs] = tuple(positional[n:])
possible_kwargs = set(args + kwonlyargs)
if varkw:
arg2value[varkw] = {}
for kw, value in named.items():
if kw not in possible_kwargs:
if not varkw:
raise TypeError("%s() got an unexpected keyword argument %r" %
(f_name, kw))
arg2value[varkw][kw] = value
continue
if kw in arg2value:
raise TypeError("%s() got multiple values for argument %r" %
(f_name, kw))
arg2value[kw] = value
if num_pos > num_args and not varargs:
_too_many(f_name, args, kwonlyargs, varargs, num_defaults,
num_pos, arg2value)
if num_pos < num_args:
req = args[:num_args - num_defaults]
for arg in req:
if arg not in arg2value:
_missing_arguments(f_name, req, True, arg2value)
for i, arg in enumerate(args[num_args - num_defaults:]):
if arg not in arg2value:
arg2value[arg] = defaults[i]
missing = 0
for kwarg in kwonlyargs:
if kwarg not in arg2value:
if kwonlydefaults and kwarg in kwonlydefaults:
arg2value[kwarg] = kwonlydefaults[kwarg]
else:
missing += 1
if missing:
_missing_arguments(f_name, kwonlyargs, False, arg2value)
return arg2value
ClosureVars = namedtuple('ClosureVars', 'nonlocals globals builtins unbound')
def getclosurevars(func):
"""
Get the mapping of free variables to their current values.
Returns a named tuple of dicts mapping the current nonlocal, global
and builtin references as seen by the body of the function. A final
set of unbound names that could not be resolved is also provided.
"""
if ismethod(func):
func = func.__func__
if not isfunction(func):
raise TypeError("'{!r}' is not a Python function".format(func))
code = func.__code__
# Nonlocal references are named in co_freevars and resolved
# by looking them up in __closure__ by positional index
if func.__closure__ is None:
nonlocal_vars = {}
else:
nonlocal_vars = {
var : cell.cell_contents
for var, cell in zip(code.co_freevars, func.__closure__)
}
# Global and builtin references are named in co_names and resolved
# by looking them up in __globals__ or __builtins__
global_ns = func.__globals__
builtin_ns = global_ns.get("__builtins__", builtins.__dict__)
if ismodule(builtin_ns):
builtin_ns = builtin_ns.__dict__
global_vars = {}
builtin_vars = {}
unbound_names = set()
for name in code.co_names:
if name in ("None", "True", "False"):
# Because these used to be builtins instead of keywords, they
# may still show up as name references. We ignore them.
continue
try:
global_vars[name] = global_ns[name]
except KeyError:
try:
builtin_vars[name] = builtin_ns[name]
except KeyError:
unbound_names.add(name)
return ClosureVars(nonlocal_vars, global_vars,
builtin_vars, unbound_names)
# -------------------------------------------------- stack frame extraction
Traceback = namedtuple('Traceback', 'filename lineno function code_context index')
def getframeinfo(frame, context=1):
"""Get information about a frame or traceback object.
A tuple of five things is returned: the filename, the line number of
the current line, the function name, a list of lines of context from
the source code, and the index of the current line within that list.
The optional second argument specifies the number of lines of context
to return, which are centered around the current line."""
if istraceback(frame):
lineno = frame.tb_lineno
frame = frame.tb_frame
else:
lineno = frame.f_lineno
if not isframe(frame):
raise TypeError('{!r} is not a frame or traceback object'.format(frame))
filename = getsourcefile(frame) or getfile(frame)
if context > 0:
start = lineno - 1 - context//2
try:
lines, lnum = findsource(frame)
except OSError:
lines = index = None
else:
start = max(start, 1)
start = max(0, min(start, len(lines) - context))
lines = lines[start:start+context]
index = lineno - 1 - start
else:
lines = index = None
return Traceback(filename, lineno, frame.f_code.co_name, lines, index)
def getlineno(frame):
"""Get the line number from a frame object, allowing for optimization."""
# FrameType.f_lineno is now a descriptor that grovels co_lnotab
return frame.f_lineno
def getouterframes(frame, context=1):
"""Get a list of records for a frame and all higher (calling) frames.
Each record contains a frame object, filename, line number, function
name, a list of lines of context, and index within the context."""
framelist = []
while frame:
framelist.append((frame,) + getframeinfo(frame, context))
frame = frame.f_back
return framelist
def getinnerframes(tb, context=1):
"""Get a list of records for a traceback's frame and all lower frames.
Each record contains a frame object, filename, line number, function
name, a list of lines of context, and index within the context."""
framelist = []
while tb:
framelist.append((tb.tb_frame,) + getframeinfo(tb, context))
tb = tb.tb_next
return framelist
def currentframe():
"""Return the frame of the caller or None if this is not possible."""
return sys._getframe(1) if hasattr(sys, "_getframe") else None
def stack(context=1):
"""Return a list of records for the stack above the caller's frame."""
return getouterframes(sys._getframe(1), context)
def trace(context=1):
"""Return a list of records for the stack below the current exception."""
return getinnerframes(sys.exc_info()[2], context)
# ------------------------------------------------ static version of getattr
_sentinel = object()
def _static_getmro(klass):
return type.__dict__['__mro__'].__get__(klass)
def _check_instance(obj, attr):
instance_dict = {}
try:
instance_dict = object.__getattribute__(obj, "__dict__")
except AttributeError:
pass
return dict.get(instance_dict, attr, _sentinel)
def _check_class(klass, attr):
for entry in _static_getmro(klass):
if _shadowed_dict(type(entry)) is _sentinel:
try:
return entry.__dict__[attr]
except KeyError:
pass
return _sentinel
def _is_type(obj):
try:
_static_getmro(obj)
except TypeError:
return False
return True
def _shadowed_dict(klass):
dict_attr = type.__dict__["__dict__"]
for entry in _static_getmro(klass):
try:
class_dict = dict_attr.__get__(entry)["__dict__"]
except KeyError:
pass
else:
if not (type(class_dict) is types.GetSetDescriptorType and
class_dict.__name__ == "__dict__" and
class_dict.__objclass__ is entry):
return class_dict
return _sentinel
def getattr_static(obj, attr, default=_sentinel):
"""Retrieve attributes without triggering dynamic lookup via the
descriptor protocol, __getattr__ or __getattribute__.
Note: this function may not be able to retrieve all attributes
that getattr can fetch (like dynamically created attributes)
and may find attributes that getattr can't (like descriptors
that raise AttributeError). It can also return descriptor objects
instead of instance members in some cases. See the
documentation for details.
"""
instance_result = _sentinel
if not _is_type(obj):
klass = type(obj)
dict_attr = _shadowed_dict(klass)
if (dict_attr is _sentinel or
type(dict_attr) is types.MemberDescriptorType):
instance_result = _check_instance(obj, attr)
else:
klass = obj
klass_result = _check_class(klass, attr)
if instance_result is not _sentinel and klass_result is not _sentinel:
if (_check_class(type(klass_result), '__get__') is not _sentinel and
_check_class(type(klass_result), '__set__') is not _sentinel):
return klass_result
if instance_result is not _sentinel:
return instance_result
if klass_result is not _sentinel:
return klass_result
if obj is klass:
# for types we check the metaclass too
for entry in _static_getmro(type(klass)):
if _shadowed_dict(type(entry)) is _sentinel:
try:
return entry.__dict__[attr]
except KeyError:
pass
if default is not _sentinel:
return default
raise AttributeError(attr)
# ------------------------------------------------ generator introspection
GEN_CREATED = 'GEN_CREATED'
GEN_RUNNING = 'GEN_RUNNING'
GEN_SUSPENDED = 'GEN_SUSPENDED'
GEN_CLOSED = 'GEN_CLOSED'
def getgeneratorstate(generator):
"""Get current state of a generator-iterator.
Possible states are:
GEN_CREATED: Waiting to start execution.
GEN_RUNNING: Currently being executed by the interpreter.
GEN_SUSPENDED: Currently suspended at a yield expression.
GEN_CLOSED: Execution has completed.
"""
if generator.gi_running:
return GEN_RUNNING
if generator.gi_frame is None:
return GEN_CLOSED
if generator.gi_frame.f_lasti == -1:
return GEN_CREATED
return GEN_SUSPENDED
def getgeneratorlocals(generator):
"""
Get the mapping of generator local variables to their current values.
A dict is returned, with the keys the local variable names and values the
bound values."""
if not isgenerator(generator):
raise TypeError("'{!r}' is not a Python generator".format(generator))
frame = getattr(generator, "gi_frame", None)
if frame is not None:
return generator.gi_frame.f_locals
else:
return {}
###############################################################################
### Function Signature Object (PEP 362)
###############################################################################
_WrapperDescriptor = type(type.__call__)
_MethodWrapper = type(all.__call__)
_ClassMethodWrapper = type(int.__dict__['from_bytes'])
_NonUserDefinedCallables = (_WrapperDescriptor,
_MethodWrapper,
_ClassMethodWrapper,
types.BuiltinFunctionType)
def _signature_get_user_defined_method(cls, method_name):
try:
meth = getattr(cls, method_name)
except AttributeError:
return
else:
if not isinstance(meth, _NonUserDefinedCallables):
# Once '__signature__' will be added to 'C'-level
# callables, this check won't be necessary
return meth
def _signature_get_partial(wrapped_sig, partial, extra_args=()):
# Internal helper to calculate how 'wrapped_sig' signature will
# look like after applying a 'functools.partial' object (or alike)
# on it.
old_params = wrapped_sig.parameters
new_params = OrderedDict(old_params.items())
partial_args = partial.args or ()
partial_keywords = partial.keywords or {}
if extra_args:
partial_args = extra_args + partial_args
try:
ba = wrapped_sig.bind_partial(*partial_args, **partial_keywords)
except TypeError as ex:
msg = 'partial object {!r} has incorrect arguments'.format(partial)
raise ValueError(msg) from ex
transform_to_kwonly = False
for param_name, param in old_params.items():
try:
arg_value = ba.arguments[param_name]
except KeyError:
pass
else:
if param.kind is _POSITIONAL_ONLY:
# If positional-only parameter is bound by partial,
# it effectively disappears from the signature
new_params.pop(param_name)
continue
if param.kind is _POSITIONAL_OR_KEYWORD:
if param_name in partial_keywords:
# This means that this parameter, and all parameters
# after it should be keyword-only (and var-positional
# should be removed). Here's why. Consider the following
# function:
# foo(a, b, *args, c):
# pass
#
# "partial(foo, a='spam')" will have the following
# signature: "(*, a='spam', b, c)". Because attempting
# to call that partial with "(10, 20)" arguments will
# raise a TypeError, saying that "a" argument received
# multiple values.
transform_to_kwonly = True
# Set the new default value
new_params[param_name] = param.replace(default=arg_value)
else:
# was passed as a positional argument
new_params.pop(param.name)
continue
if param.kind is _KEYWORD_ONLY:
# Set the new default value
new_params[param_name] = param.replace(default=arg_value)
if transform_to_kwonly:
assert param.kind is not _POSITIONAL_ONLY
if param.kind is _POSITIONAL_OR_KEYWORD:
new_param = new_params[param_name].replace(kind=_KEYWORD_ONLY)
new_params[param_name] = new_param
new_params.move_to_end(param_name)
elif param.kind in (_KEYWORD_ONLY, _VAR_KEYWORD):
new_params.move_to_end(param_name)
elif param.kind is _VAR_POSITIONAL:
new_params.pop(param.name)
return wrapped_sig.replace(parameters=new_params.values())
def _signature_bound_method(sig):
# Internal helper to transform signatures for unbound
# functions to bound methods
params = tuple(sig.parameters.values())
if not params or params[0].kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
raise ValueError('invalid method signature')
kind = params[0].kind
if kind in (_POSITIONAL_OR_KEYWORD, _POSITIONAL_ONLY):
# Drop first parameter:
# '(p1, p2[, ...])' -> '(p2[, ...])'
params = params[1:]
else:
if kind is not _VAR_POSITIONAL:
# Unless we add a new parameter type we never
# get here
raise ValueError('invalid argument type')
# It's a var-positional parameter.
# Do nothing. '(*args[, ...])' -> '(*args[, ...])'
return sig.replace(parameters=params)
def _signature_is_builtin(obj):
# Internal helper to test if `obj` is a callable that might
# support Argument Clinic's __text_signature__ protocol.
return (isbuiltin(obj) or
ismethoddescriptor(obj) or
isinstance(obj, _NonUserDefinedCallables) or
# Can't test 'isinstance(type)' here, as it would
# also be True for regular python classes
obj in (type, object))
def _signature_is_functionlike(obj):
# Internal helper to test if `obj` is a duck type of FunctionType.
# A good example of such objects are functions compiled with
# Cython, which have all attributes that a pure Python function
# would have, but have their code statically compiled.
if not callable(obj) or isclass(obj):
# All function-like objects are obviously callables,
# and not classes.
return False
name = getattr(obj, '__name__', None)
code = getattr(obj, '__code__', None)
defaults = getattr(obj, '__defaults__', _void) # Important to use _void ...
kwdefaults = getattr(obj, '__kwdefaults__', _void) # ... and not None here
annotations = getattr(obj, '__annotations__', None)
return (isinstance(code, types.CodeType) and
isinstance(name, str) and
(defaults is None or isinstance(defaults, tuple)) and
(kwdefaults is None or isinstance(kwdefaults, dict)) and
isinstance(annotations, dict))
def _signature_get_bound_param(spec):
# Internal helper to get first parameter name from a
# __text_signature__ of a builtin method, which should
# be in the following format: '($param1, ...)'.
# Assumptions are that the first argument won't have
# a default value or an annotation.
assert spec.startswith('($')
pos = spec.find(',')
if pos == -1:
pos = spec.find(')')
cpos = spec.find(':')
assert cpos == -1 or cpos > pos
cpos = spec.find('=')
assert cpos == -1 or cpos > pos
return spec[2:pos]
def _signature_strip_non_python_syntax(signature):
"""
Takes a signature in Argument Clinic's extended signature format.
Returns a tuple of three things:
* that signature re-rendered in standard Python syntax,
* the index of the "self" parameter (generally 0), or None if
the function does not have a "self" parameter, and
* the index of the last "positional only" parameter,
or None if the signature has no positional-only parameters.
"""
if not signature:
return signature, None, None
self_parameter = None
last_positional_only = None
lines = [l.encode('ascii') for l in signature.split('\n')]
generator = iter(lines).__next__
token_stream = tokenize.tokenize(generator)
delayed_comma = False
skip_next_comma = False
text = []
add = text.append
current_parameter = 0
OP = token.OP
ERRORTOKEN = token.ERRORTOKEN
# token stream always starts with ENCODING token, skip it
t = next(token_stream)
assert t.type == tokenize.ENCODING
for t in token_stream:
type, string = t.type, t.string
if type == OP:
if string == ',':
if skip_next_comma:
skip_next_comma = False
else:
assert not delayed_comma
delayed_comma = True
current_parameter += 1
continue
if string == '/':
assert not skip_next_comma
assert last_positional_only is None
skip_next_comma = True
last_positional_only = current_parameter - 1
continue
if (type == ERRORTOKEN) and (string == '$'):
assert self_parameter is None
self_parameter = current_parameter
continue
if delayed_comma:
delayed_comma = False
if not ((type == OP) and (string == ')')):
add(', ')
add(string)
if (string == ','):
add(' ')
clean_signature = ''.join(text)
return clean_signature, self_parameter, last_positional_only
def _signature_fromstr(cls, obj, s, skip_bound_arg=True):
# Internal helper to parse content of '__text_signature__'
# and return a Signature based on it
Parameter = cls._parameter_cls
clean_signature, self_parameter, last_positional_only = \
_signature_strip_non_python_syntax(s)
program = "def foo" + clean_signature + ": pass"
try:
module = ast.parse(program)
except SyntaxError:
module = None
if not isinstance(module, ast.Module):
raise ValueError("{!r} builtin has invalid signature".format(obj))
f = module.body[0]
parameters = []
empty = Parameter.empty
invalid = object()
module = None
module_dict = {}
module_name = getattr(obj, '__module__', None)
if module_name:
module = sys.modules.get(module_name, None)
if module:
module_dict = module.__dict__
sys_module_dict = sys.modules
def parse_name(node):
assert isinstance(node, ast.arg)
if node.annotation != None:
raise ValueError("Annotations are not currently supported")
return node.arg
def wrap_value(s):
try:
value = eval(s, module_dict)
except NameError:
try:
value = eval(s, sys_module_dict)
except NameError:
raise RuntimeError()
if isinstance(value, str):
return ast.Str(value)
if isinstance(value, (int, float)):
return ast.Num(value)
if isinstance(value, bytes):
return ast.Bytes(value)
if value in (True, False, None):
return ast.NameConstant(value)
raise RuntimeError()
class RewriteSymbolics(ast.NodeTransformer):
def visit_Attribute(self, node):
a = []
n = node
while isinstance(n, ast.Attribute):
a.append(n.attr)
n = n.value
if not isinstance(n, ast.Name):
raise RuntimeError()
a.append(n.id)
value = ".".join(reversed(a))
return wrap_value(value)
def visit_Name(self, node):
if not isinstance(node.ctx, ast.Load):
raise ValueError()
return wrap_value(node.id)
def p(name_node, default_node, default=empty):
name = parse_name(name_node)
if name is invalid:
return None
if default_node and default_node is not _empty:
try:
default_node = RewriteSymbolics().visit(default_node)
o = ast.literal_eval(default_node)
except ValueError:
o = invalid
if o is invalid:
return None
default = o if o is not invalid else default
parameters.append(Parameter(name, kind, default=default, annotation=empty))
# non-keyword-only parameters
args = reversed(f.args.args)
defaults = reversed(f.args.defaults)
iter = itertools.zip_longest(args, defaults, fillvalue=None)
if last_positional_only is not None:
kind = Parameter.POSITIONAL_ONLY
else:
kind = Parameter.POSITIONAL_OR_KEYWORD
for i, (name, default) in enumerate(reversed(list(iter))):
p(name, default)
if i == last_positional_only:
kind = Parameter.POSITIONAL_OR_KEYWORD
# *args
if f.args.vararg:
kind = Parameter.VAR_POSITIONAL
p(f.args.vararg, empty)
# keyword-only arguments
kind = Parameter.KEYWORD_ONLY
for name, default in zip(f.args.kwonlyargs, f.args.kw_defaults):
p(name, default)
# **kwargs
if f.args.kwarg:
kind = Parameter.VAR_KEYWORD
p(f.args.kwarg, empty)
if self_parameter is not None:
# Possibly strip the bound argument:
# - We *always* strip first bound argument if
# it is a module.
# - We don't strip first bound argument if
# skip_bound_arg is False.
assert parameters
_self = getattr(obj, '__self__', None)
self_isbound = _self is not None
self_ismodule = ismodule(_self)
if self_isbound and (self_ismodule or skip_bound_arg):
parameters.pop(0)
else:
# for builtins, self parameter is always positional-only!
p = parameters[0].replace(kind=Parameter.POSITIONAL_ONLY)
parameters[0] = p
return cls(parameters, return_annotation=cls.empty)
def _signature_from_builtin(cls, func, skip_bound_arg=True):
# Internal helper function to get signature for
# builtin callables
if not _signature_is_builtin(func):
raise TypeError("{!r} is not a Python builtin "
"function".format(func))
s = getattr(func, "__text_signature__", None)
if not s:
raise ValueError("no signature found for builtin {!r}".format(func))
return _signature_fromstr(cls, func, s, skip_bound_arg)
def _signature_internal(obj, follow_wrapper_chains=True, skip_bound_arg=True):
if not callable(obj):
raise TypeError('{!r} is not a callable object'.format(obj))
if isinstance(obj, types.MethodType):
# In this case we skip the first parameter of the underlying
# function (usually `self` or `cls`).
sig = _signature_internal(obj.__func__,
follow_wrapper_chains,
skip_bound_arg)
if skip_bound_arg:
return _signature_bound_method(sig)
else:
return sig
# Was this function wrapped by a decorator?
if follow_wrapper_chains:
obj = unwrap(obj, stop=(lambda f: hasattr(f, "__signature__")))
try:
sig = obj.__signature__
except AttributeError:
pass
else:
if sig is not None:
return sig
try:
partialmethod = obj._partialmethod
except AttributeError:
pass
else:
if isinstance(partialmethod, functools.partialmethod):
# Unbound partialmethod (see functools.partialmethod)
# This means, that we need to calculate the signature
# as if it's a regular partial object, but taking into
# account that the first positional argument
# (usually `self`, or `cls`) will not be passed
# automatically (as for boundmethods)
wrapped_sig = _signature_internal(partialmethod.func,
follow_wrapper_chains,
skip_bound_arg)
sig = _signature_get_partial(wrapped_sig, partialmethod, (None,))
first_wrapped_param = tuple(wrapped_sig.parameters.values())[0]
new_params = (first_wrapped_param,) + tuple(sig.parameters.values())
return sig.replace(parameters=new_params)
if isfunction(obj) or _signature_is_functionlike(obj):
# If it's a pure Python function, or an object that is duck type
# of a Python function (Cython functions, for instance), then:
return Signature.from_function(obj)
if _signature_is_builtin(obj):
return _signature_from_builtin(Signature, obj,
skip_bound_arg=skip_bound_arg)
if isinstance(obj, functools.partial):
wrapped_sig = _signature_internal(obj.func,
follow_wrapper_chains,
skip_bound_arg)
return _signature_get_partial(wrapped_sig, obj)
sig = None
if isinstance(obj, type):
# obj is a class or a metaclass
# First, let's see if it has an overloaded __call__ defined
# in its metaclass
call = _signature_get_user_defined_method(type(obj), '__call__')
if call is not None:
sig = _signature_internal(call,
follow_wrapper_chains,
skip_bound_arg)
else:
# Now we check if the 'obj' class has a '__new__' method
new = _signature_get_user_defined_method(obj, '__new__')
if new is not None:
sig = _signature_internal(new,
follow_wrapper_chains,
skip_bound_arg)
else:
# Finally, we should have at least __init__ implemented
init = _signature_get_user_defined_method(obj, '__init__')
if init is not None:
sig = _signature_internal(init,
follow_wrapper_chains,
skip_bound_arg)
if sig is None:
# At this point we know, that `obj` is a class, with no user-
# defined '__init__', '__new__', or class-level '__call__'
for base in obj.__mro__[:-1]:
# Since '__text_signature__' is implemented as a
# descriptor that extracts text signature from the
# class docstring, if 'obj' is derived from a builtin
# class, its own '__text_signature__' may be 'None'.
# Therefore, we go through the MRO (except the last
# class in there, which is 'object') to find the first
# class with non-empty text signature.
try:
text_sig = base.__text_signature__
except AttributeError:
pass
else:
if text_sig:
# If 'obj' class has a __text_signature__ attribute:
# return a signature based on it
return _signature_fromstr(Signature, obj, text_sig)
# No '__text_signature__' was found for the 'obj' class.
# Last option is to check if its '__init__' is
# object.__init__ or type.__init__.
if type not in obj.__mro__:
# We have a class (not metaclass), but no user-defined
# __init__ or __new__ for it
if obj.__init__ is object.__init__:
# Return a signature of 'object' builtin.
return signature(object)
elif not isinstance(obj, _NonUserDefinedCallables):
# An object with __call__
# We also check that the 'obj' is not an instance of
# _WrapperDescriptor or _MethodWrapper to avoid
# infinite recursion (and even potential segfault)
call = _signature_get_user_defined_method(type(obj), '__call__')
if call is not None:
try:
sig = _signature_internal(call,
follow_wrapper_chains,
skip_bound_arg)
except ValueError as ex:
msg = 'no signature found for {!r}'.format(obj)
raise ValueError(msg) from ex
if sig is not None:
# For classes and objects we skip the first parameter of their
# __call__, __new__, or __init__ methods
if skip_bound_arg:
return _signature_bound_method(sig)
else:
return sig
if isinstance(obj, types.BuiltinFunctionType):
# Raise a nicer error message for builtins
msg = 'no signature found for builtin function {!r}'.format(obj)
raise ValueError(msg)
raise ValueError('callable {!r} is not supported by signature'.format(obj))
def signature(obj):
'''Get a signature object for the passed callable.'''
return _signature_internal(obj)
class _void:
'''A private marker - used in Parameter & Signature'''
class _empty:
pass
class _ParameterKind(int):
def __new__(self, *args, name):
obj = int.__new__(self, *args)
obj._name = name
return obj
def __str__(self):
return self._name
def __repr__(self):
return '<_ParameterKind: {!r}>'.format(self._name)
_POSITIONAL_ONLY = _ParameterKind(0, name='POSITIONAL_ONLY')
_POSITIONAL_OR_KEYWORD = _ParameterKind(1, name='POSITIONAL_OR_KEYWORD')
_VAR_POSITIONAL = _ParameterKind(2, name='VAR_POSITIONAL')
_KEYWORD_ONLY = _ParameterKind(3, name='KEYWORD_ONLY')
_VAR_KEYWORD = _ParameterKind(4, name='VAR_KEYWORD')
class Parameter:
'''Represents a parameter in a function signature.
Has the following public attributes:
* name : str
The name of the parameter as a string.
* default : object
The default value for the parameter if specified. If the
parameter has no default value, this attribute is set to
`Parameter.empty`.
* annotation
The annotation for the parameter if specified. If the
parameter has no annotation, this attribute is set to
`Parameter.empty`.
* kind : str
Describes how argument values are bound to the parameter.
Possible values: `Parameter.POSITIONAL_ONLY`,
`Parameter.POSITIONAL_OR_KEYWORD`, `Parameter.VAR_POSITIONAL`,
`Parameter.KEYWORD_ONLY`, `Parameter.VAR_KEYWORD`.
'''
__slots__ = ('_name', '_kind', '_default', '_annotation')
POSITIONAL_ONLY = _POSITIONAL_ONLY
POSITIONAL_OR_KEYWORD = _POSITIONAL_OR_KEYWORD
VAR_POSITIONAL = _VAR_POSITIONAL
KEYWORD_ONLY = _KEYWORD_ONLY
VAR_KEYWORD = _VAR_KEYWORD
empty = _empty
def __init__(self, name, kind, *, default=_empty, annotation=_empty):
if kind not in (_POSITIONAL_ONLY, _POSITIONAL_OR_KEYWORD,
_VAR_POSITIONAL, _KEYWORD_ONLY, _VAR_KEYWORD):
raise ValueError("invalid value for 'Parameter.kind' attribute")
self._kind = kind
if default is not _empty:
if kind in (_VAR_POSITIONAL, _VAR_KEYWORD):
msg = '{} parameters cannot have default values'.format(kind)
raise ValueError(msg)
self._default = default
self._annotation = annotation
if name is _empty:
raise ValueError('name is a required attribute for Parameter')
if not isinstance(name, str):
raise TypeError("name must be a str, not a {!r}".format(name))
if not name.isidentifier():
raise ValueError('{!r} is not a valid parameter name'.format(name))
self._name = name
@property
def name(self):
return self._name
@property
def default(self):
return self._default
@property
def annotation(self):
return self._annotation
@property
def kind(self):
return self._kind
def replace(self, *, name=_void, kind=_void,
annotation=_void, default=_void):
'''Creates a customized copy of the Parameter.'''
if name is _void:
name = self._name
if kind is _void:
kind = self._kind
if annotation is _void:
annotation = self._annotation
if default is _void:
default = self._default
return type(self)(name, kind, default=default, annotation=annotation)
def __str__(self):
kind = self.kind
formatted = self._name
# Add annotation and default value
if self._annotation is not _empty:
formatted = '{}:{}'.format(formatted,
formatannotation(self._annotation))
if self._default is not _empty:
formatted = '{}={}'.format(formatted, repr(self._default))
if kind == _VAR_POSITIONAL:
formatted = '*' + formatted
elif kind == _VAR_KEYWORD:
formatted = '**' + formatted
return formatted
def __repr__(self):
return '<{} at {:#x} {!r}>'.format(self.__class__.__name__,
id(self), self.name)
def __eq__(self, other):
return (issubclass(other.__class__, Parameter) and
self._name == other._name and
self._kind == other._kind and
self._default == other._default and
self._annotation == other._annotation)
def __ne__(self, other):
return not self.__eq__(other)
class BoundArguments:
'''Result of `Signature.bind` call. Holds the mapping of arguments
to the function's parameters.
Has the following public attributes:
* arguments : OrderedDict
An ordered mutable mapping of parameters' names to arguments' values.
Does not contain arguments' default values.
* signature : Signature
The Signature object that created this instance.
* args : tuple
Tuple of positional arguments values.
* kwargs : dict
Dict of keyword arguments values.
'''
def __init__(self, signature, arguments):
self.arguments = arguments
self._signature = signature
@property
def signature(self):
return self._signature
@property
def args(self):
args = []
for param_name, param in self._signature.parameters.items():
if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
break
try:
arg = self.arguments[param_name]
except KeyError:
# We're done here. Other arguments
# will be mapped in 'BoundArguments.kwargs'
break
else:
if param.kind == _VAR_POSITIONAL:
# *args
args.extend(arg)
else:
# plain argument
args.append(arg)
return tuple(args)
@property
def kwargs(self):
kwargs = {}
kwargs_started = False
for param_name, param in self._signature.parameters.items():
if not kwargs_started:
if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
kwargs_started = True
else:
if param_name not in self.arguments:
kwargs_started = True
continue
if not kwargs_started:
continue
try:
arg = self.arguments[param_name]
except KeyError:
pass
else:
if param.kind == _VAR_KEYWORD:
# **kwargs
kwargs.update(arg)
else:
# plain keyword argument
kwargs[param_name] = arg
return kwargs
def __eq__(self, other):
return (issubclass(other.__class__, BoundArguments) and
self.signature == other.signature and
self.arguments == other.arguments)
def __ne__(self, other):
return not self.__eq__(other)
class Signature:
'''A Signature object represents the overall signature of a function.
It stores a Parameter object for each parameter accepted by the
function, as well as information specific to the function itself.
A Signature object has the following public attributes and methods:
* parameters : OrderedDict
An ordered mapping of parameters' names to the corresponding
Parameter objects (keyword-only arguments are in the same order
as listed in `code.co_varnames`).
* return_annotation : object
The annotation for the return type of the function if specified.
If the function has no annotation for its return type, this
attribute is set to `Signature.empty`.
* bind(*args, **kwargs) -> BoundArguments
Creates a mapping from positional and keyword arguments to
parameters.
* bind_partial(*args, **kwargs) -> BoundArguments
Creates a partial mapping from positional and keyword arguments
to parameters (simulating 'functools.partial' behavior.)
'''
__slots__ = ('_return_annotation', '_parameters')
_parameter_cls = Parameter
_bound_arguments_cls = BoundArguments
empty = _empty
def __init__(self, parameters=None, *, return_annotation=_empty,
__validate_parameters__=True):
'''Constructs Signature from the given list of Parameter
objects and 'return_annotation'. All arguments are optional.
'''
if parameters is None:
params = OrderedDict()
else:
if __validate_parameters__:
params = OrderedDict()
top_kind = _POSITIONAL_ONLY
kind_defaults = False
for idx, param in enumerate(parameters):
kind = param.kind
name = param.name
if kind < top_kind:
msg = 'wrong parameter order: {!r} before {!r}'
msg = msg.format(top_kind, kind)
raise ValueError(msg)
elif kind > top_kind:
kind_defaults = False
top_kind = kind
if kind in (_POSITIONAL_ONLY, _POSITIONAL_OR_KEYWORD):
if param.default is _empty:
if kind_defaults:
# No default for this parameter, but the
# previous parameter of the same kind had
# a default
msg = 'non-default argument follows default ' \
'argument'
raise ValueError(msg)
else:
# There is a default for this parameter.
kind_defaults = True
if name in params:
msg = 'duplicate parameter name: {!r}'.format(name)
raise ValueError(msg)
params[name] = param
else:
params = OrderedDict(((param.name, param)
for param in parameters))
self._parameters = types.MappingProxyType(params)
self._return_annotation = return_annotation
@classmethod
def from_function(cls, func):
'''Constructs Signature for the given python function'''
is_duck_function = False
if not isfunction(func):
if _signature_is_functionlike(func):
is_duck_function = True
else:
# If it's not a pure Python function, and not a duck type
# of pure function:
raise TypeError('{!r} is not a Python function'.format(func))
Parameter = cls._parameter_cls
# Parameter information.
func_code = func.__code__
pos_count = func_code.co_argcount
arg_names = func_code.co_varnames
positional = tuple(arg_names[:pos_count])
keyword_only_count = func_code.co_kwonlyargcount
keyword_only = arg_names[pos_count:(pos_count + keyword_only_count)]
annotations = func.__annotations__
defaults = func.__defaults__
kwdefaults = func.__kwdefaults__
if defaults:
pos_default_count = len(defaults)
else:
pos_default_count = 0
parameters = []
# Non-keyword-only parameters w/o defaults.
non_default_count = pos_count - pos_default_count
for name in positional[:non_default_count]:
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_POSITIONAL_OR_KEYWORD))
# ... w/ defaults.
for offset, name in enumerate(positional[non_default_count:]):
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_POSITIONAL_OR_KEYWORD,
default=defaults[offset]))
# *args
if func_code.co_flags & CO_VARARGS:
name = arg_names[pos_count + keyword_only_count]
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_VAR_POSITIONAL))
# Keyword-only parameters.
for name in keyword_only:
default = _empty
if kwdefaults is not None:
default = kwdefaults.get(name, _empty)
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_KEYWORD_ONLY,
default=default))
# **kwargs
if func_code.co_flags & CO_VARKEYWORDS:
index = pos_count + keyword_only_count
if func_code.co_flags & CO_VARARGS:
index += 1
name = arg_names[index]
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_VAR_KEYWORD))
# Is 'func' is a pure Python function - don't validate the
# parameters list (for correct order and defaults), it should be OK.
return cls(parameters,
return_annotation=annotations.get('return', _empty),
__validate_parameters__=is_duck_function)
@classmethod
def from_builtin(cls, func):
return _signature_from_builtin(cls, func)
@property
def parameters(self):
return self._parameters
@property
def return_annotation(self):
return self._return_annotation
def replace(self, *, parameters=_void, return_annotation=_void):
'''Creates a customized copy of the Signature.
Pass 'parameters' and/or 'return_annotation' arguments
to override them in the new copy.
'''
if parameters is _void:
parameters = self.parameters.values()
if return_annotation is _void:
return_annotation = self._return_annotation
return type(self)(parameters,
return_annotation=return_annotation)
def __eq__(self, other):
if (not issubclass(type(other), Signature) or
self.return_annotation != other.return_annotation or
len(self.parameters) != len(other.parameters)):
return False
other_positions = {param: idx
for idx, param in enumerate(other.parameters.keys())}
for idx, (param_name, param) in enumerate(self.parameters.items()):
if param.kind == _KEYWORD_ONLY:
try:
other_param = other.parameters[param_name]
except KeyError:
return False
else:
if param != other_param:
return False
else:
try:
other_idx = other_positions[param_name]
except KeyError:
return False
else:
if (idx != other_idx or
param != other.parameters[param_name]):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def _bind(self, args, kwargs, *, partial=False):
'''Private method. Don't use directly.'''
arguments = OrderedDict()
parameters = iter(self.parameters.values())
parameters_ex = ()
arg_vals = iter(args)
while True:
# Let's iterate through the positional arguments and corresponding
# parameters
try:
arg_val = next(arg_vals)
except StopIteration:
# No more positional arguments
try:
param = next(parameters)
except StopIteration:
# No more parameters. That's it. Just need to check that
# we have no `kwargs` after this while loop
break
else:
if param.kind == _VAR_POSITIONAL:
# That's OK, just empty *args. Let's start parsing
# kwargs
break
elif param.name in kwargs:
if param.kind == _POSITIONAL_ONLY:
msg = '{arg!r} parameter is positional only, ' \
'but was passed as a keyword'
msg = msg.format(arg=param.name)
raise TypeError(msg) from None
parameters_ex = (param,)
break
elif (param.kind == _VAR_KEYWORD or
param.default is not _empty):
# That's fine too - we have a default value for this
# parameter. So, lets start parsing `kwargs`, starting
# with the current parameter
parameters_ex = (param,)
break
else:
# No default, not VAR_KEYWORD, not VAR_POSITIONAL,
# not in `kwargs`
if partial:
parameters_ex = (param,)
break
else:
msg = '{arg!r} parameter lacking default value'
msg = msg.format(arg=param.name)
raise TypeError(msg) from None
else:
# We have a positional argument to process
try:
param = next(parameters)
except StopIteration:
raise TypeError('too many positional arguments') from None
else:
if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
# Looks like we have no parameter for this positional
# argument
raise TypeError('too many positional arguments')
if param.kind == _VAR_POSITIONAL:
# We have an '*args'-like argument, let's fill it with
# all positional arguments we have left and move on to
# the next phase
values = [arg_val]
values.extend(arg_vals)
arguments[param.name] = tuple(values)
break
if param.name in kwargs:
raise TypeError('multiple values for argument '
'{arg!r}'.format(arg=param.name))
arguments[param.name] = arg_val
# Now, we iterate through the remaining parameters to process
# keyword arguments
kwargs_param = None
for param in itertools.chain(parameters_ex, parameters):
if param.kind == _VAR_KEYWORD:
# Memorize that we have a '**kwargs'-like parameter
kwargs_param = param
continue
if param.kind == _VAR_POSITIONAL:
# Named arguments don't refer to '*args'-like parameters.
# We only arrive here if the positional arguments ended
# before reaching the last parameter before *args.
continue
param_name = param.name
try:
arg_val = kwargs.pop(param_name)
except KeyError:
# We have no value for this parameter. It's fine though,
# if it has a default value, or it is an '*args'-like
# parameter, left alone by the processing of positional
# arguments.
if (not partial and param.kind != _VAR_POSITIONAL and
param.default is _empty):
raise TypeError('{arg!r} parameter lacking default value'. \
format(arg=param_name)) from None
else:
if param.kind == _POSITIONAL_ONLY:
# This should never happen in case of a properly built
# Signature object (but let's have this check here
# to ensure correct behaviour just in case)
raise TypeError('{arg!r} parameter is positional only, '
'but was passed as a keyword'. \
format(arg=param.name))
arguments[param_name] = arg_val
if kwargs:
if kwargs_param is not None:
# Process our '**kwargs'-like parameter
arguments[kwargs_param.name] = kwargs
else:
raise TypeError('too many keyword arguments')
return self._bound_arguments_cls(self, arguments)
def bind(*args, **kwargs):
'''Get a BoundArguments object, that maps the passed `args`
and `kwargs` to the function's signature. Raises `TypeError`
if the passed arguments can not be bound.
'''
return args[0]._bind(args[1:], kwargs)
def bind_partial(*args, **kwargs):
'''Get a BoundArguments object, that partially maps the
passed `args` and `kwargs` to the function's signature.
Raises `TypeError` if the passed arguments can not be bound.
'''
return args[0]._bind(args[1:], kwargs, partial=True)
def __str__(self):
result = []
render_pos_only_separator = False
render_kw_only_separator = True
for param in self.parameters.values():
formatted = str(param)
kind = param.kind
if kind == _POSITIONAL_ONLY:
render_pos_only_separator = True
elif render_pos_only_separator:
# It's not a positional-only parameter, and the flag
# is set to 'True' (there were pos-only params before.)
result.append('/')
render_pos_only_separator = False
if kind == _VAR_POSITIONAL:
# OK, we have an '*args'-like parameter, so we won't need
# a '*' to separate keyword-only arguments
render_kw_only_separator = False
elif kind == _KEYWORD_ONLY and render_kw_only_separator:
# We have a keyword-only parameter to render and we haven't
# rendered an '*args'-like parameter before, so add a '*'
# separator to the parameters list ("foo(arg1, *, arg2)" case)
result.append('*')
# This condition should be only triggered once, so
# reset the flag
render_kw_only_separator = False
result.append(formatted)
if render_pos_only_separator:
# There were only positional-only parameters, hence the
# flag was not reset to 'False'
result.append('/')
rendered = '({})'.format(', '.join(result))
if self.return_annotation is not _empty:
anno = formatannotation(self.return_annotation)
rendered += ' -> {}'.format(anno)
return rendered
def _main():
""" Logic for inspecting an object given at command line """
import argparse
import importlib
parser = argparse.ArgumentParser()
parser.add_argument(
'object',
help="The object to be analysed. "
"It supports the 'module:qualname' syntax")
parser.add_argument(
'-d', '--details', action='store_true',
help='Display info about the module rather than its source code')
args = parser.parse_args()
target = args.object
mod_name, has_attrs, attrs = target.partition(":")
try:
obj = module = importlib.import_module(mod_name)
except Exception as exc:
msg = "Failed to import {} ({}: {})".format(mod_name,
type(exc).__name__,
exc)
print(msg, file=sys.stderr)
exit(2)
if has_attrs:
parts = attrs.split(".")
obj = module
for part in parts:
obj = getattr(obj, part)
if module.__name__ in sys.builtin_module_names:
print("Can't get info for builtin modules.", file=sys.stderr)
exit(1)
if args.details:
print('Target: {}'.format(target))
print('Origin: {}'.format(getsourcefile(module)))
print('Cached: {}'.format(module.__cached__))
if obj is module:
print('Loader: {}'.format(repr(module.__loader__)))
if hasattr(module, '__path__'):
print('Submodule search path: {}'.format(module.__path__))
else:
try:
__, lineno = findsource(obj)
except Exception:
pass
else:
print('Line: {}'.format(lineno))
print('\n')
else:
print(getsource(obj))
if __name__ == "__main__":
_main()
| PennartLoettring/Poettrix | rootfs/usr/lib/python3.4/inspect.py | Python | gpl-2.0 | 103,929 |
#!/sw/bin/python2.7
import sys
sys.path.append("..")
from ucnacore.PyxUtils import *
from math import *
from ucnacore.LinFitter import *
#from UCNAUtils import *
from bisect import bisect
from calib.FieldMapGen import *
def clip_function(y,rho,h,R):
sqd = sqrt(rho**2-y**2)
if sqd==0:
sqd = 1e-10
return h*rho**2/R*atan(y/sqd)+2*sqd/(3*R)*(3*h*y/2+rho**2-y**2)
def survival_fraction(h,rho,R):
d = R-h
if d < -rho:
return 1
if h <= -rho:
return 0
c1 = 0
if d < rho:
sqd = sqrt(rho**2-d**2)
c1 = pi/2*rho**2-d*sqd-rho**2*atan(d/sqd)
return ( c1 + clip_function(min(h,rho),rho,h,R)
- clip_function(max(h-R,-rho),rho,h,R))/(pi*rho**2)
def radial_clip_function(r,rho,h,R):
return r**2*(3*h-2*r)/(6*R**2)
def radial_survival_fraction(h,rho,R):
d = h-R
if d > rho:
return 1
if h <= 0:
return 0
c1 = 0
if d > 0:
c1 = (h-R)**2
return ( c1 + radial_clip_function(min(h,rho),rho,h,R) - radial_clip_function(max(d,0),rho,h,R) )/(rho**2)
class rot3:
def __init__(self,t1,t2,t3,s=1.0):
self.c1,self.s1 = cos(t1),sin(t1)
self.c2,self.s2 = cos(t2),sin(t2)
self.c3,self.s3 = cos(t3),sin(t3)
self.s = s
def __call__(self,(x,y,z)):
x,y = self.c1*x+self.s1*y,self.c1*y-self.s1*x
y,z = self.c2*y+self.s2*z,self.c2*z-self.s2*y
z,x = self.c3*z+self.s3*x,self.c3*x-self.s3*z
return self.s*x,self.s*y,self.s*z
class path3d:
def __init__(self):
self.pts = []
self.sty = []
self.endsty = []
self.breakunder = False
self.nopatch = False
def addpt(self,(x,y,z),s=1):
self.pts.append((x*s,y*s,z*s))
def apply(self,transf):
self.pts = [transf(xyz) for xyz in self.pts]
def finish(self):
self.p = path.path()
self.p.append(path.moveto(self.pts[0][0],self.pts[0][1]))
for g in self.pts[1:]:
self.p.append(path.lineto(g[0],g[1]))
self.patchpts = []
self.underpts = []
def nearestpt(self,(x,y)):
d0 = 1e20
n = None
for i in range(len(self.pts)):
d1 = (self.pts[i][0]-x)**2+(self.pts[i][1]-y)**2
if d1 < d0:
d0 = d1
n = i
return n
def znear(self,(x,y)):
return self.pts[self.nearestpt((x,y))][2]
def znearc(self,c):
x,y = self.p.at(c)
x,y = 100*x.t,100*y.t
return self.znear((x,y))
def addPatch(self,c,z):
self.patchpts.append((c,z))
def drawto(self,cnvs):
cnvs.stroke(self.p,self.sty)
def interleave(p3d1,p3d2):
print "Finding intersection points..."
is1,is2 = p3d1.p.intersect(p3d2.p)
print "determining patch z..."
assert len(is1)==len(is2)
for i in range(len(is1)):
z1 = p3d1.znearc(is1[i])
z2 = p3d2.znearc(is2[i])
if z1>z2:
p3d1.addPatch(is1[i],z1)
p3d2.underpts.append(is2[i])
else:
p3d2.addPatch(is2[i],z2)
p3d1.underpts.append(is1[i])
print "done."
def drawInterleaved(c,ps):
print "Drawing base curves..."
for p in ps:
p.p = p.p.normpath()
if p.breakunder:
splits = []
for s in p.underpts:
splits += [s-p.breakunder*0.5,s+p.breakunder*0.5]
psplit = p.p.split(splits)
for seg in psplit[0::2]:
c.stroke(seg,p.sty)
else:
c.stroke(p.p,p.sty+p.endsty)
print "Preparing patches..."
patches = []
for (pn,p) in enumerate(ps):
if p.nopatch:
continue
p.patchpts.sort()
splits = []
for s in p.patchpts:
splits += [s[0]-0.05,s[0]+0.05]
psplit = p.p.split(splits)
patches += [ (patch[1],pn,psplit[2*n+1]) for n,patch in enumerate(p.patchpts) ]
patches.sort()
print "Patching intersections..."
for p in patches:
c.stroke(p[2],ps[p[1]].sty)
print "Done."
def fieldPath(fmap,z0,z1,c,cmax,npts=50):
pfield = path3d()
for z in unifrange(z0,z1,npts):
Bdens = c/sqrt(fmap(z)+0.0001)
if abs(Bdens) < cmax:
pfield.addpt((0,Bdens,z))
return pfield
def larmor_unif(fT,theta,KE,t):
b = electron_beta(KE)
z = t*b*cos(theta)*3e8 # m
r = 3.3e-6*b*(KE+511)*sin(theta)/fT # m
f = 2.8e10*fT # Hz
return r*cos(2*pi*f*t),r*sin(2*pi*f*t),z
def larmor_step(p,pt2_per_B,fT):
nu = 2.8e10*fT*2*pi # angular frequency, Hz
pt = sqrt(fT*pt2_per_B) # transverse momentum component, keV
if p<=pt:
return 0,nu
pl = sqrt(p**2-pt**2) # longitudinal momentum, keV
vz = pl/sqrt(p*p+511*511)*3e8; # z velocity, m/s
return vz,nu
def larmorPath(fmap,p,pt2_per_B,z0,z1,dt,theta=0):
lpath = path3d()
z = z0
vz = 1
while z0 <= z <= z1 and vz>0:
fT = fmap(z) # magnetic field, T
r = 3.3e-6*sqrt(pt2_per_B/fT) # larmor radius, m
lpath.addpt((r*cos(theta),r*sin(theta),z))
# step to next point
vz,nu = larmor_step(p,pt2_per_B,fmap(z))
theta += nu*dt
z += vz*dt
return lpath
def plot_larmor_trajectory():
fmap = fieldMap()
fmap.addFlat(-1.0,0.01,1.0)
fmap.addFlat(0.015,1.0,0.6)
#fmap.addFlat(-1.0,0.01,0.6)
#fmap.addFlat(0.08,1.0,1.0)
fT = fmap(0)
theta = 1.4
KE = 511.
#rot = rot3(0,0.0,-pi/2-0.2,500)
rot = rot3(0,0.0,-pi/2+0.2,500)
tm = 1e-9
doFinal = True
plarmor = larmorPath(fmap,500,495**2/fmap(0),0,0.02,5e-13,3*pi/4)
plarmor.apply(rot)
#plarmor.sty = [style.linewidth.thick,rgb.red]
plarmor.sty = [style.linewidth.thick]
plarmor.endsty = [deco.earrow()]
plarmor.finish()
x0,y0 = plarmor.p.at(plarmor.p.begin())
fieldlines = []
w = 0.0025
cmagf = canvas.canvas()
for o in unifrange(-w,w,20):
pf = fieldPath(fmap,-0.002,0.022,o,1.02*w)
if len(pf.pts) < 10:
continue
pf.apply(rot)
pf.finish()
pf.breakunder = 0.07
pf.nopatch = True
#pf.sty=[style.linewidth.thin,rgb.blue]
pf.sty=[style.linewidth.thin] # field line color/style
fieldlines.append(pf)
pf.drawto(cmagf)
if doFinal:
interleave(plarmor,pf)
#cmagf.stroke(path.circle(x0,y0,0.07),[deco.filled([rgb.green])])
cmagf.stroke(path.circle(x0,y0,0.07),[deco.filled([rgb.white]),style.linewidth.Thick])
cmagf.writetofile("/Users/michael/Desktop/Bfield.pdf")
c = canvas.canvas()
if doFinal:
drawInterleaved(c,[plarmor,]+fieldlines)
else:
plarmor.drawto(c)
for pf in fieldlines:
pf.drawto(c)
#c.stroke(path.circle(x0,y0,0.07),[deco.filled([rgb.green])])
c.stroke(path.circle(x0,y0,0.07),[deco.filled([rgb.white]),style.linewidth.Thick])
c.writetofile("/Users/michael/Desktop/larmor_spiral.pdf")
def plot_spectrometer_field():
fmap = fieldMap()
fmap.addFlat(-3,-2.8,0.01)
fmap.addFlat(-2.3,-2.1,0.6)
fmap.addFlat(-1.6,1.6,1.0)
fmap.addFlat(2.1,2.3,0.6)
fmap.addFlat(2.8,3,0.01)
rot = rot3(0.0,0.0,-pi/2.,10.)
w = 0.25
cmagf = canvas.canvas()
for o in unifrange(-w,w,20):
pf = fieldPath(fmap,-2.6,2.6,o,w,400)
pf.apply(rot)
#if len(pf.pts) < 10:
# continue
pf.finish()
#pf.sty=[style.linewidth.thin,rgb.blue]
pf.sty=[style.linewidth.thin] # field line color/style
pf.drawto(cmagf)
cmagf.writetofile("/Users/michael/Desktop/Bfield.pdf")
def larmor_clipping_plot():
gSurv=graph.graphxy(width=20,height=10,
x=graph.axis.lin(title="Source offset [mm]"),
y=graph.axis.lin(title="",min=0,max=1),
key = graph.key.key(pos="bl"))
gSurv.texrunner.set(lfs='foils17pt')
rho = 1.5
h0 = 9.5
gdat = [ [h0-h,survival_fraction(h,rho,2*3.3),survival_fraction(h,rho,2*3.3/2)] for h in unifrange(h0-10,h0,100) ]
gdat = [ g+[0.5*(g[2]<=1e-3)+(g[2]>1e-3)*(g[1]/(g[2]+1e-6)),] for g in gdat]
gSurv.plot(graph.data.points(gdat,x=1,y=3,title="500keV line survival"),[graph.style.line([style.linewidth.Thick,rgb.blue])])
gSurv.plot(graph.data.points(gdat,x=1,y=2,title="1MeV line survival"),[graph.style.line([style.linewidth.Thick,rgb.red])])
gSurv.plot(graph.data.points(gdat,x=1,y=4,title="1MeV:500keV survival ratio"),[graph.style.line([style.linewidth.Thick])])
gSurv.writetofile("/Users/michael/Desktop/survival_%g.pdf"%rho)
def radial_clipping_plot():
gSurv=graph.graphxy(width=20,height=10,
x=graph.axis.lin(title="Source spot radius [mm]",min=0,max=9.5),
y=graph.axis.lin(title="",min=0,max=1),
key = graph.key.key(pos="bl"))
gSurv.texrunner.set(lfs='foils17pt')
h = 9.5
gdat = [ [rho,radial_survival_fraction(h,rho,3.3),radial_survival_fraction(h,rho,3.3/2.0)] for rho in unifrange(0.,9.5,200) ]
gdat = [ g+[0.5*(g[2]<=1e-3)+(g[2]>1e-3)*(g[1]/(g[2]+1e-6)),] for g in gdat]
gSurv.plot(graph.data.points(gdat,x=1,y=3,title="500keV line survival"),[graph.style.line([style.linewidth.Thick,rgb.blue])])
gSurv.plot(graph.data.points(gdat,x=1,y=2,title="1MeV line survival"),[graph.style.line([style.linewidth.Thick,rgb.red])])
gSurv.plot(graph.data.points(gdat,x=1,y=4,title="1MeV:500keV survival ratio"),[graph.style.line([style.linewidth.Thick])])
gSurv.writetofile("/Users/michael/Desktop/survival_radial.pdf")
if __name__ == "__main__":
#larmor_clipping_plot()
#radial_clipping_plot()
#plot_larmor_trajectory()
plot_spectrometer_field() | UCNA/main | Scripts/plotters/LarmorClipping.py | Python | gpl-3.0 | 8,636 |
#!/usr/bin/env python
# snakeoil.py
# Chris X Edwards <[email protected]>
# Snake Oil is a Python library for interfacing with a TORCS
# race car simulator which has been patched with the server
# extentions used in the Simulated Car Racing competitions.
# http://scr.geccocompetitions.com/
#
# To use it, you must import it and create a "drive()" function.
# This will take care of option handling and server connecting, etc.
# To see how to write your own client do something like this which is
# a complete working client:
# /-----------------------------------------------\
# |#!/usr/bin/python |
# |import snakeoil |
# |if __name__ == "__main__": |
# | C= snakeoil.Client() |
# | for step in xrange(C.maxSteps,0,-1): |
# | C.get_servers_input() |
# | snakeoil.drive_example(C) |
# | C.respond_to_server() |
# | C.shutdown() |
# \-----------------------------------------------/
# This should then be a full featured client. The next step is to
# replace 'snakeoil.drive_example()' with your own. There is a
# dictionary which holds various option values (see `default_options`
# variable for all the details) but you probably only need a few
# things from it. Mainly the `trackname` and `stage` are important
# when developing a strategic bot.
#
# This dictionary also contains a ServerState object
# (key=S) and a DriverAction object (key=R for response). This allows
# you to get at all the information sent by the server and to easily
# formulate your reply. These objects contain a member dictionary "d"
# (for data dictionary) which contain key value pairs based on the
# server's syntax. Therefore, you can read the following:
# angle, curLapTime, damage, distFromStart, distRaced, focus,
# fuel, gear, lastLapTime, opponents, racePos, rpm,
# speedX, speedY, speedZ, track, trackPos, wheelSpinVel, z
# The syntax specifically would be something like:
# X= o[S.d['tracPos']]
# And you can set the following:
# accel, brake, clutch, gear, steer, focus, meta
# The syntax is:
# o[R.d['steer']]= X
# Note that it is 'steer' and not 'steering' as described in the manual!
# All values should be sensible for their type, including lists being lists.
# See the SCR manual or http://xed.ch/help/torcs.html for details.
#
# If you just run the snakeoil.py base library itself it will implement a
# serviceable client with a demonstration drive function that is
# sufficient for getting around most tracks.
# Try `snakeoil.py --help` to get started.
# for Python3-based torcs python robot client
from __future__ import division
from __future__ import absolute_import
import socket
import sys
import getopt
import os
import time
PI= 3.14159265359
data_size = 2**17
# Initialize help messages
ophelp= u'Options:\n'
ophelp+= u' --host, -H <host> TORCS server host. [localhost]\n'
ophelp+= u' --port, -p <port> TORCS port. [3001]\n'
ophelp+= u' --id, -i <id> ID for server. [SCR]\n'
ophelp+= u' --steps, -m <#> Maximum simulation steps. 1 sec ~ 50 steps. [100000]\n'
ophelp+= u' --episodes, -e <#> Maximum learning episodes. [1]\n'
ophelp+= u' --track, -t <track> Your name for this track. Used for learning. [unknown]\n'
ophelp+= u' --stage, -s <#> 0=warm up, 1=qualifying, 2=race, 3=unknown. [3]\n'
ophelp+= u' --debug, -d Output full telemetry.\n'
ophelp+= u' --help, -h Show this help.\n'
ophelp+= u' --version, -v Show current version.'
usage= u'Usage: %s [ophelp [optargs]] \n' % sys.argv[0]
usage= usage + ophelp
version= u"20130505-2"
def clip(v,lo,hi):
if v<lo: return lo
elif v>hi: return hi
else: return v
def bargraph(x,mn,mx,w,c=u'X'):
u'''Draws a simple asciiart bar graph. Very handy for
visualizing what's going on with the data.
x= Value from sensor, mn= minimum plottable value,
mx= maximum plottable value, w= width of plot in chars,
c= the character to plot with.'''
if not w: return u'' # No width!
if x<mn: x= mn # Clip to bounds.
if x>mx: x= mx # Clip to bounds.
tx= mx-mn # Total real units possible to show on graph.
if tx<=0: return u'backwards' # Stupid bounds.
upw= tx/float(w) # X Units per output char width.
if upw<=0: return u'what?' # Don't let this happen.
negpu, pospu, negnonpu, posnonpu= 0,0,0,0
if mn < 0: # Then there is a negative part to graph.
if x < 0: # And the plot is on the negative side.
negpu= -x + min(0,mx)
negnonpu= -mn + x
else: # Plot is on pos. Neg side is empty.
negnonpu= -mn + min(0,mx) # But still show some empty neg.
if mx > 0: # There is a positive part to the graph
if x > 0: # And the plot is on the positive side.
pospu= x - max(0,mn)
posnonpu= mx - x
else: # Plot is on neg. Pos side is empty.
posnonpu= mx - max(0,mn) # But still show some empty pos.
nnc= int(negnonpu/upw)*u'-'
npc= int(negpu/upw)*c
ppc= int(pospu/upw)*c
pnc= int(posnonpu/upw)*u'_'
return u'[%s]' % (nnc+npc+ppc+pnc)
class Client(object):
def __init__(self,H=None,p=None,i=None,e=None,t=None,s=None,d=None,vision=False):
# If you don't like the option defaults, change them here.
self.vision = vision
self.host= u'localhost'
self.port= 3001
self.sid= u'SCR'
self.maxEpisodes=1 # "Maximum number of learning episodes to perform"
self.trackname= u'unknown'
self.stage= 3 # 0=Warm-up, 1=Qualifying 2=Race, 3=unknown <Default=3>
self.debug= False
self.maxSteps= 100000 # 50steps/second
self.parse_the_command_line()
if H: self.host= H
if p: self.port= p
if i: self.sid= i
if e: self.maxEpisodes= e
if t: self.trackname= t
if s: self.stage= s
if d: self.debug= d
self.S= ServerState()
self.R= DriverAction()
self.setup_connection()
def setup_connection(self):
# == Set Up UDP Socket ==
try:
self.so= socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
except socket.error, emsg:
print u'Error: Could not create socket...'
sys.exit(-1)
# == Initialize Connection To Server ==
self.so.settimeout(1)
n_fail = 5
while True:
# This string establishes track sensor angles! You can customize them.
#a= "-90 -75 -60 -45 -30 -20 -15 -10 -5 0 5 10 15 20 30 45 60 75 90"
# xed- Going to try something a bit more aggressive...
a= u"-45 -19 -12 -7 -4 -2.5 -1.7 -1 -.5 0 .5 1 1.7 2.5 4 7 12 19 45"
initmsg=u'%s(init %s)' % (self.sid,a)
try:
self.so.sendto(initmsg.encode(), (self.host, self.port))
except socket.error, emsg:
sys.exit(-1)
sockdata= unicode()
try:
sockdata,addr= self.so.recvfrom(data_size)
sockdata = sockdata.decode(u'utf-8')
except socket.error, emsg:
print u"Waiting for server on %d............" % self.port
print u"Count Down : " + unicode(n_fail)
if n_fail < 0:
print u"relaunch torcs"
os.system(u'pkill torcs')
time.sleep(1.0)
if self.vision is False:
os.system(u'torcs -nofuel -nodamage -nolaptime &')
else:
os.system(u'torcs -nofuel -nodamage -nolaptime -vision &')
time.sleep(1.0)
os.system(u'sh autostart.sh')
n_fail = 5
n_fail -= 1
identify = u'***identified***'
if identify in sockdata:
print u"Client connected on %d.............." % self.port
break
def parse_the_command_line(self):
try:
(opts, args) = getopt.getopt(sys.argv[1:], u'H:p:i:m:e:t:s:dhv',
[u'host=',u'port=',u'id=',u'steps=',
u'episodes=',u'track=',u'stage=',
u'debug',u'help',u'version'])
except getopt.error, why:
print u'getopt error: %s\n%s' % (why, usage)
sys.exit(-1)
try:
for opt in opts:
if opt[0] == u'-h' or opt[0] == u'--help':
print usage
sys.exit(0)
if opt[0] == u'-d' or opt[0] == u'--debug':
self.debug= True
if opt[0] == u'-H' or opt[0] == u'--host':
self.host= opt[1]
if opt[0] == u'-i' or opt[0] == u'--id':
self.sid= opt[1]
if opt[0] == u'-t' or opt[0] == u'--track':
self.trackname= opt[1]
if opt[0] == u'-s' or opt[0] == u'--stage':
self.stage= int(opt[1])
if opt[0] == u'-p' or opt[0] == u'--port':
self.port= int(opt[1])
if opt[0] == u'-e' or opt[0] == u'--episodes':
self.maxEpisodes= int(opt[1])
if opt[0] == u'-m' or opt[0] == u'--steps':
self.maxSteps= int(opt[1])
if opt[0] == u'-v' or opt[0] == u'--version':
print u'%s %s' % (sys.argv[0], version)
sys.exit(0)
except ValueError, why:
print u'Bad parameter \'%s\' for option %s: %s\n%s' % (
opt[1], opt[0], why, usage)
sys.exit(-1)
if len(args) > 0:
print u'Superflous input? %s\n%s' % (u', '.join(args), usage)
sys.exit(-1)
def get_servers_input(self):
u'''Server's input is stored in a ServerState object'''
if not self.so: return
sockdata= unicode()
while True:
try:
# Receive server data
sockdata,addr= self.so.recvfrom(data_size)
sockdata = sockdata.decode(u'utf-8')
except socket.error, emsg:
print u'.',
#print "Waiting for data on %d.............." % self.port
if u'***identified***' in sockdata:
print u"Client connected on %d.............." % self.port
continue
elif u'***shutdown***' in sockdata:
print ((u"Server has stopped the race on %d. "+
u"You were in %d place.") %
(self.port,self.S.d[u'racePos']))
self.shutdown()
return
elif u'***restart***' in sockdata:
# What do I do here?
print u"Server has restarted the race on %d." % self.port
# I haven't actually caught the server doing this.
self.shutdown()
return
elif not sockdata: # Empty?
continue # Try again.
else:
self.S.parse_server_str(sockdata)
if self.debug:
sys.stderr.write(u"\x1b[2J\x1b[H") # Clear for steady output.
print self.S
break # Can now return from this function.
def respond_to_server(self):
if not self.so: return
try:
message = repr(self.R)
self.so.sendto(message.encode(), (self.host, self.port))
except socket.error, emsg:
print u"Error sending to server: %s Message %s" % (emsg[1],unicode(emsg[0]))
sys.exit(-1)
if self.debug: print self.R.fancyout()
# Or use this for plain output:
#if self.debug: print self.R
def shutdown(self):
if not self.so: return
print (u"Race terminated or %d steps elapsed. Shutting down %d."
% (self.maxSteps,self.port))
self.so.close()
self.so = None
#sys.exit() # No need for this really.
class ServerState(object):
u'''What the server is reporting right now.'''
def __init__(self):
self.servstr= unicode()
self.d= dict()
def parse_server_str(self, server_string):
u'''Parse the server string.'''
self.servstr= server_string.strip()[:-1]
sslisted= self.servstr.strip().lstrip(u'(').rstrip(u')').split(u')(')
for i in sslisted:
w= i.split(u' ')
self.d[w[0]]= destringify(w[1:])
def __repr__(self):
# Comment the next line for raw output:
return self.fancyout()
# -------------------------------------
out= unicode()
for k in sorted(self.d):
strout= unicode(self.d[k])
if type(self.d[k]) is list:
strlist= [unicode(i) for i in self.d[k]]
strout= u', '.join(strlist)
out+= u"%s: %s\n" % (k,strout)
return out
def fancyout(self):
u'''Specialty output for useful ServerState monitoring.'''
out= unicode()
sensors= [ # Select the ones you want in the order you want them.
#'curLapTime',
#'lastLapTime',
u'stucktimer',
#'damage',
#'focus',
u'fuel',
#'gear',
u'distRaced',
u'distFromStart',
#'racePos',
u'opponents',
u'wheelSpinVel',
u'z',
u'speedZ',
u'speedY',
u'speedX',
u'targetSpeed',
u'rpm',
u'skid',
u'slip',
u'track',
u'trackPos',
u'angle',
]
#for k in sorted(self.d): # Use this to get all sensors.
for k in sensors:
if type(self.d.get(k)) is list: # Handle list type data.
if k == u'track': # Nice display for track sensors.
strout= unicode()
# for tsensor in self.d['track']:
# if tsensor >180: oc= '|'
# elif tsensor > 80: oc= ';'
# elif tsensor > 60: oc= ','
# elif tsensor > 39: oc= '.'
# #elif tsensor > 13: oc= chr(int(tsensor)+65-13)
# elif tsensor > 13: oc= chr(int(tsensor)+97-13)
# elif tsensor > 3: oc= chr(int(tsensor)+48-3)
# else: oc= '_'
# strout+= oc
# strout= ' -> '+strout[:9] +' ' + strout[9] + ' ' + strout[10:]+' <-'
raw_tsens= [u'%.1f'%x for x in self.d[u'track']]
strout+= u' '.join(raw_tsens[:9])+u'_'+raw_tsens[9]+u'_'+u' '.join(raw_tsens[10:])
elif k == u'opponents': # Nice display for opponent sensors.
strout= unicode()
for osensor in self.d[u'opponents']:
if osensor >190: oc= u'_'
elif osensor > 90: oc= u'.'
elif osensor > 39: oc= unichr(int(osensor/2)+97-19)
elif osensor > 13: oc= unichr(int(osensor)+65-13)
elif osensor > 3: oc= unichr(int(osensor)+48-3)
else: oc= u'?'
strout+= oc
strout= u' -> '+strout[:18] + u' ' + strout[18:]+u' <-'
else:
strlist= [unicode(i) for i in self.d[k]]
strout= u', '.join(strlist)
else: # Not a list type of value.
if k == u'gear': # This is redundant now since it's part of RPM.
gs= u'_._._._._._._._._'
p= int(self.d[u'gear']) * 2 + 2 # Position
l= u'%d'%self.d[u'gear'] # Label
if l==u'-1': l= u'R'
if l==u'0': l= u'N'
strout= gs[:p]+ u'(%s)'%l + gs[p+3:]
elif k == u'damage':
strout= u'%6.0f %s' % (self.d[k], bargraph(self.d[k],0,10000,50,u'~'))
elif k == u'fuel':
strout= u'%6.0f %s' % (self.d[k], bargraph(self.d[k],0,100,50,u'f'))
elif k == u'speedX':
cx= u'X'
if self.d[k]<0: cx= u'R'
strout= u'%6.1f %s' % (self.d[k], bargraph(self.d[k],-30,300,50,cx))
elif k == u'speedY': # This gets reversed for display to make sense.
strout= u'%6.1f %s' % (self.d[k], bargraph(self.d[k]*-1,-25,25,50,u'Y'))
elif k == u'speedZ':
strout= u'%6.1f %s' % (self.d[k], bargraph(self.d[k],-13,13,50,u'Z'))
elif k == u'z':
strout= u'%6.3f %s' % (self.d[k], bargraph(self.d[k],.3,.5,50,u'z'))
elif k == u'trackPos': # This gets reversed for display to make sense.
cx=u'<'
if self.d[k]<0: cx= u'>'
strout= u'%6.3f %s' % (self.d[k], bargraph(self.d[k]*-1,-1,1,50,cx))
elif k == u'stucktimer':
if self.d[k]:
strout= u'%3d %s' % (self.d[k], bargraph(self.d[k],0,300,50,u"'"))
else: strout= u'Not stuck!'
elif k == u'rpm':
g= self.d[u'gear']
if g < 0:
g= u'R'
else:
g= u'%1d'% g
strout= bargraph(self.d[k],0,10000,50,g)
elif k == u'angle':
asyms= [
u" ! ", u".|' ", u"./' ", u"_.- ", u".-- ", u"..- ",
u"--- ", u".__ ", u"-._ ", u"'-. ", u"'\. ", u"'|. ",
u" | ", u" .|'", u" ./'", u" .-'", u" _.-", u" __.",
u" ---", u" --.", u" -._", u" -..", u" '\.", u" '|." ]
rad= self.d[k]
deg= int(rad*180/PI)
symno= int(.5+ (rad+PI) / (PI/12) )
symno= symno % (len(asyms)-1)
strout= u'%5.2f %3d (%s)' % (rad,deg,asyms[symno])
elif k == u'skid': # A sensible interpretation of wheel spin.
frontwheelradpersec= self.d[u'wheelSpinVel'][0]
skid= 0
if frontwheelradpersec:
skid= .5555555555*self.d[u'speedX']/frontwheelradpersec - .66124
strout= bargraph(skid,-.05,.4,50,u'*')
elif k == u'slip': # A sensible interpretation of wheel spin.
frontwheelradpersec= self.d[u'wheelSpinVel'][0]
slip= 0
if frontwheelradpersec:
slip= ((self.d[u'wheelSpinVel'][2]+self.d[u'wheelSpinVel'][3]) -
(self.d[u'wheelSpinVel'][0]+self.d[u'wheelSpinVel'][1]))
strout= bargraph(slip,-5,150,50,u'@')
else:
strout= unicode(self.d[k])
out+= u"%s: %s\n" % (k,strout)
return out
class DriverAction(object):
u'''What the driver is intending to do (i.e. send to the server).
Composes something like this for the server:
(accel 1)(brake 0)(gear 1)(steer 0)(clutch 0)(focus 0)(meta 0) or
(accel 1)(brake 0)(gear 1)(steer 0)(clutch 0)(focus -90 -45 0 45 90)(meta 0)'''
def __init__(self):
self.actionstr= unicode()
# "d" is for data dictionary.
self.d= { u'accel':0.2,
u'brake':0,
u'clutch':0,
u'gear':1,
u'steer':0,
u'focus':[-90,-45,0,45,90],
u'meta':0
}
def clip_to_limits(self):
u"""There pretty much is never a reason to send the server
something like (steer 9483.323). This comes up all the time
and it's probably just more sensible to always clip it than to
worry about when to. The "clip" command is still a snakeoil
utility function, but it should be used only for non standard
things or non obvious limits (limit the steering to the left,
for example). For normal limits, simply don't worry about it."""
self.d[u'steer']= clip(self.d[u'steer'], -1, 1)
self.d[u'brake']= clip(self.d[u'brake'], 0, 1)
self.d[u'accel']= clip(self.d[u'accel'], 0, 1)
self.d[u'clutch']= clip(self.d[u'clutch'], 0, 1)
if self.d[u'gear'] not in [-1, 0, 1, 2, 3, 4, 5, 6]:
self.d[u'gear']= 0
if self.d[u'meta'] not in [0,1]:
self.d[u'meta']= 0
if type(self.d[u'focus']) is not list or min(self.d[u'focus'])<-180 or max(self.d[u'focus'])>180:
self.d[u'focus']= 0
def __repr__(self):
self.clip_to_limits()
out= unicode()
for k in self.d:
out+= u'('+k+u' '
v= self.d[k]
if not type(v) is list:
out+= u'%.3f' % v
else:
out+= u' '.join([unicode(x) for x in v])
out+= u')'
return out
return out+u'\n'
def fancyout(self):
u'''Specialty output for useful monitoring of bot's effectors.'''
out= unicode()
od= self.d.copy()
od.pop(u'gear',u'') # Not interesting.
od.pop(u'meta',u'') # Not interesting.
od.pop(u'focus',u'') # Not interesting. Yet.
for k in sorted(od):
if k == u'clutch' or k == u'brake' or k == u'accel':
strout=u''
strout= u'%6.3f %s' % (od[k], bargraph(od[k],0,1,50,k[0].upper()))
elif k == u'steer': # Reverse the graph to make sense.
strout= u'%6.3f %s' % (od[k], bargraph(od[k]*-1,-1,1,50,u'S'))
else:
strout= unicode(od[k])
out+= u"%s: %s\n" % (k,strout)
return out
# == Misc Utility Functions
def destringify(s):
u'''makes a string into a value or a list of strings into a list of
values (if possible)'''
if not s: return s
if type(s) is unicode:
try:
return float(s)
except ValueError:
print u"Could not find a value in %s" % s
return s
elif type(s) is list:
if len(s) < 2:
return destringify(s[0])
else:
return [destringify(i) for i in s]
def drive_example(c):
u'''This is only an example. It will get around the track but the
correct thing to do is write your own `drive()` function.'''
S,R= c.S.d,c.R.d
target_speed=1000
# Steer To Corner
R[u'steer']= S[u'angle']*10 / PI
# Steer To Center
R[u'steer']-= S[u'trackPos']*.10
# Throttle Control
if S[u'speedX'] < target_speed - (R[u'steer']*50):
R[u'accel']+= .01
else:
R[u'accel']-= .01
if S[u'speedX']<10:
R[u'accel']+= 1/(S[u'speedX']+.1)
# Traction Control System
if ((S[u'wheelSpinVel'][2]+S[u'wheelSpinVel'][3]) -
(S[u'wheelSpinVel'][0]+S[u'wheelSpinVel'][1]) > 5):
R[u'accel']-= .2
# Automatic Transmission
R[u'gear']=1
if S[u'speedX']>50:
R[u'gear']=2
if S[u'speedX']>80:
R[u'gear']=3
if S[u'speedX']>110:
R[u'gear']=4
if S[u'speedX']>140:
R[u'gear']=5
if S[u'speedX']>170:
R[u'gear']=6
return
# ================ MAIN ================
"""
if __name__ == u"__main__":
C= Client(p=3101)
for step in xrange(C.maxSteps,0,-1):
C.get_servers_input()
drive_example(C)
C.respond_to_server()
C.shutdown()
"""
if __name__ == u"__main__":
C= [ snakeoil.Client(p=P) for P in [3001,3002,3003,3004] ]
for step in xrange(C[0].maxSteps,0,-1):
for Cs in C:
Cs.get_servers_input()
snakeoil.drive_example(Cs)
Cs.respond_to_server()
else:
for Cs in C: Cs.shutdown()
| sneharavi12/DeepLearningFinals | SelfDrivingRL/snakeoil3_gym.py | Python | mit | 24,145 |
from keras import backend as K
from keras import initializations
from keras.backend.common import _EPSILON
from keras.engine.topology import Layer
from keras.engine import InputSpec
from theano.tensor.nnet import h_softmax
import theano.tensor as T
class HierarchicalSoftmax(Layer):
def __init__(self, output_dim, init='glorot_uniform', **kwargs):
self.init = initializations.get(init)
self.output_dim = output_dim
def hshape(n):
from math import sqrt, ceil
l1 = ceil(sqrt(n))
l2 = ceil(n / l1)
return int(l1), int(l2)
self.n_classes, self.n_outputs_per_class = hshape(output_dim)
super(HierarchicalSoftmax, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(shape=shape) for shape in input_shape]
input_dim = self.input_spec[0].shape[-1]
self.W1 = self.init((input_dim, self.n_classes), name='{}_W1'.format(self.name))
self.b1 = K.zeros((self.n_classes,), name='{}_b1'.format(self.name))
self.W2 = self.init((self.n_classes, input_dim, self.n_outputs_per_class), name='{}_W2'.format(self.name))
self.b2 = K.zeros((self.n_classes, self.n_outputs_per_class), name='{}_b2'.format(self.name))
self.trainable_weights = [self.W1, self.b1, self.W2, self.b2]
def get_output_shape_for(self, input_shape):
return (input_shape[0][0], input_shape[0][1], None)
def call(self, X, mask=None):
input_shape = self.input_spec[0].shape
x = K.reshape(X[0], (-1, input_shape[2]))
target = X[1].flatten() if self.trainable else None
Y = h_softmax(x, K.shape(x)[0], self.output_dim,
self.n_classes, self.n_outputs_per_class,
self.W1, self.b1, self.W2, self.b2, target)
output_dim = 1 if self.trainable else self.output_dim
input_length = K.shape(X[0])[1]
y = K.reshape(Y, (-1, input_length, output_dim))
return y
def get_config(self):
config = {'output_dim': self.output_dim,
'init': self.init.__name__}
base_config = super(HierarchicalSoftmax, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def hs_categorical_crossentropy(y_true, y_pred):
y_pred = T.clip(y_pred, _EPSILON, 1.0 - _EPSILON)
return T.nnet.categorical_crossentropy(y_pred, y_true)
| jstarc/nli_generation | hierarchical_softmax.py | Python | mit | 2,566 |
# coding: utf-8
# This file is part of https://github.com/marcus67/rechtschreibung
import os
import io
import re
import pickle
import six
import log
import util
import spelling_mode
import predefined_modes
if six.PY3:
from importlib import reload
reload(log)
reload(util)
reload(spelling_mode)
reload(predefined_modes)
MODE_FILE_DIRECTORY = u'configurations'
MODE_FILE_EXTENSION = u'.mode.pickle'
MODE_FILE_EXTENSION_PATTERN = u'(.+)' + MODE_FILE_EXTENSION.replace(u'.', u'\.')
global logger
logger = log.open_logging(__name__)
def get_mode_filename(p_document_directory, modeName):
return os.path.join(
p_document_directory, MODE_FILE_DIRECTORY,
u"%s%s" % ( modeName, MODE_FILE_EXTENSION))
def read_mode(p_document_directory, modeName):
global logger
filename = get_mode_filename(p_document_directory, modeName)
file = io.open(filename, "rb")
logger.info("read mode file '%s'" % filename)
mode = pickle.load(file)
file.close()
changes = util.add_missing_attributes(mode.combination, spelling_mode.spelling_mode().combination)
mode.control.is_modified = False
if changes > 0:
logger.info("mode file '%s' is lacking %d attributes; filling up" % (filename, changes))
write_mode(mode)
return mode
def write_mode(p_document_directory, mode):
global logger
filename = get_mode_filename(p_document_directory, mode.control.name)
directory = os.path.dirname(filename)
if not os.path.exists(directory):
os.makedirs(directory)
logger.info("write mode file '%s'" % filename)
file = io.open(filename, "wb")
pickle.dump(mode, file, protocol=1)
file.close()
mode.control.is_modified = False
def get_available_modes(p_document_directory, includePredefinedModes = True):
modes = []
if includePredefinedModes:
modes.extend(predefined_modes.get_predefined_modes())
filePattern = re.compile(MODE_FILE_EXTENSION_PATTERN)
for file in os.listdir(os.path.join(p_document_directory, MODE_FILE_DIRECTORY)):
match = filePattern.match(file)
if (match):
modeName = match.group(1)
mode = read_mode(p_document_directory, modeName)
modes.append(mode)
return modes
def test():
modes = get_available_modes(".")
for mode in modes:
if mode.control.isImmutable:
mode.control.name = mode.control.name + u' (pickled)'
mode.control.isImmutable = False
write_mode(".", mode)
if __name__ == '__main__':
test()
| marcus67/rechtschreibung | mode_manager.py | Python | gpl-2.0 | 2,386 |
# coding=UTF-8
# Author: Dennis Lutter <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import unittest
import test_lib
import sys, os.path
from sickbeard.postProcessor import PostProcessor
import sickbeard
from sickbeard.tv import TVEpisode, TVShow
import sickbeard.common as c
from snatch_tests import tests
class PPInitTests(unittest.TestCase):
def setUp(self):
self.pp = PostProcessor(test_lib.FILEPATH)
def test_init_file_name(self):
self.assertEqual(self.pp.file_name, test_lib.FILENAME)
def test_init_folder_name(self):
self.assertEqual(self.pp.folder_name, test_lib.SHOWNAME)
class PPPrivateTests(test_lib.SickbeardTestDBCase):
def setUp(self):
super(PPPrivateTests, self).setUp()
sickbeard.showList = [TVShow(0000), TVShow(0001)]
self.pp = PostProcessor(test_lib.FILEPATH)
self.show_obj = TVShow(0002)
self.db = test_lib.db.DBConnection()
newValueDict = {"tvdbid": 1002,
"name": test_lib.SHOWNAME,
"description": "description",
"airdate": 1234,
"hasnfo": 1,
"hastbn": 1,
"status": 404,
"location": test_lib.FILEPATH}
controlValueDict = {"showid": 0002,
"season": test_lib.SEASON,
"episode": test_lib.EPISODE}
# use a custom update/insert method to get the data into the DB
self.db.upsert("tv_episodes", newValueDict, controlValueDict)
self.ep_obj = TVEpisode(self.show_obj, test_lib.SEASON, test_lib.EPISODE, test_lib.FILEPATH)
def test__find_ep_destination_folder(self):
self.show_obj.location = test_lib.FILEDIR
self.ep_obj.show.seasonfolders = 1
sickbeard.SEASON_FOLDERS_FORMAT = 'Season %02d'
calculatedPath = self.pp._find_ep_destination_folder(self.ep_obj)
ecpectedPath = os.path.join(test_lib.FILEDIR, "Season 0" + str(test_lib.SEASON))
self.assertEqual(calculatedPath, ecpectedPath)
class PPBasicTests(test_lib.SickbeardTestDBCase):
filePath = ''
showDir = ''
def tearDown(self):
if self.showDir:
test_lib.tearDown_test_show_dir(self.showDir)
if self.filePath:
test_lib.tearDown_test_episode_file(os.path.dirname(self.filePath))
test_lib.SickbeardTestDBCase.tearDown(self)
def test_generator(tvdbdid, show_name, curData):
def test(self):
self.showDir = test_lib.setUp_test_show_dir(show_name)
self.filePath = test_lib.setUp_test_episode_file(None, curData["b"]+".mkv")
show = TVShow(tvdbdid)
show.name = show_name
show.quality = curData["q"]
show.location = self.showDir
if curData["anime"]:
show.anime = 1
show.saveToDB()
sickbeard.showList.append(show)
for epNumber in curData["e"]:
episode = TVEpisode(show, curData["s"], epNumber)
episode.status = c.WANTED
if "ab" in curData:
episode.absolute_number = curData["ab"]
episode.saveToDB()
pp = PostProcessor(self.filePath)
self.assertTrue(pp.process())
return test
# create the test methods
tvdbdid = 1
for name, curData in tests.items(): # we use the tests from snatch_tests.py
if not curData["a"]:
continue
fname = name.replace(' ', '_')
test_name = 'test_pp_%s_%s' % (fname, tvdbdid)
test = test_generator(tvdbdid, name, curData)
setattr(PPBasicTests, test_name, test)
tvdbdid += 1
if __name__ == '__main__':
print "=================="
print "STARTING - PostProcessor TESTS"
print "=================="
print "######################################################################"
suite = unittest.TestLoader().loadTestsFromTestCase(PPInitTests)
unittest.TextTestRunner(verbosity=2).run(suite)
print "######################################################################"
suite = unittest.TestLoader().loadTestsFromTestCase(PPPrivateTests)
unittest.TextTestRunner(verbosity=2).run(suite)
print "######################################################################"
suite = unittest.TestLoader().loadTestsFromTestCase(PPBasicTests)
unittest.TextTestRunner(verbosity=2).run(suite)
| rleadbetter/Sick-Beard | tests/pp_tests.py | Python | gpl-3.0 | 5,084 |
# -*- coding: utf-8 -*-
import os
import shutil
import exodus
from sets import Set
import numpy as np
DEFAULT_BLOCK = 1
TIMESTEP_ZERO = 1
def getElemsAndNodes(input_file):
"Gets the number of elements and nodes per elements in `input_file`"
exo_from = exodus.exodus(input_file,"r",array_type='ctype')
BLOCK_ID = 1
_, _, nNodeElm, _ = exo_from.elem_blk_info(BLOCK_ID)
num_elems = exo_from.num_elems()
return (num_elems,nNodeElm)
def getExodusCopy(exodus_file_existing,exodus_file_new,parameter_names):
'''
Gets a copy of an existing exodus file, for the purpose of adding or changing existing parameter values (VP,VS,etc)
:param exodus_file_existing Existing file
:param exodus_file_new Exodus cannot write to an existing file, so write to a new one.
:param parameter_names The variable names being added to the file. We cannot add these after the fact, so they are added now at file creation.
:return The newly created exodus file
'''
exo_from = exodus.exodus(exodus_file_existing,"r",array_type='ctype')
num_vars_from = exo_from.get_element_variable_number(
if num_vars_from == 0:
addElementVariables = parameter_names
exo = exodus.copyTransfer(exodus_file_existing,exodus_file_new)
print("Adding {} to no existing variables".format(addElementVariables))
exo.set_element_variable_number(len(addElementVariables))
for (i,name) in enumerate(addElementVariables):
exo.put_element_variable_name(name,i+1)
return exo
else:
existing_names = exo_from.get_element_variable_names()
set_existing_names = Set(existing_names)
set_parameter_names = Set(parameter_names)
set_new_names = set_parameter_names - set_existing_names
addElementVariables = list(set_new_names)
print("Adding {} to existing variables {}".format(addElementVariables,existing_names))
return exodus.copyTransfer(exodus_file_existing,exodus_file_new,additionalElementVariables=addElementVariables)
def setMaterialParameter(exoObj,parameter_names, parameter_values):
# type: (exoObj: Any ,parameter_names: [str], parameter_values: [[double]])
'''
Sets value to an element parameter. The parameter name must already be in the file.
:param exoObj Exodus file object
:param paramter_names Ex: ["VP_0","VP_1","VP_2","fluid"]
:param paramter_values Ex: For 3-element mesh "[[4,4,4],[4,4,4],[4,4,4],[0,0,0]]" (fluid=0 is elastic)
'''
BLOCK_ID = 1
if not (parameter_names[0] in exoObj.get_element_variable_names()):
raise Exception("ERROR: paramter name {} not in exodus file (has: {}) -- add it using getExodusCopy(,,parameter_names=names)!".format(parameter_names[0],exoObj.get_element_variable_names()))
for (i,(parameter_name,parameter_value)) in enumerate(zip(parameter_names,parameter_values)):
exoObj.put_element_variable_values(BLOCK_ID, parameter_name, TIMESTEP_ZERO, parameter_value)
| SalvusHub/salvus | src/py/pysalvus/model_handling/model.py | Python | mit | 3,058 |
from __future__ import absolute_import
import os
import time
INTEGRATION_TESTS = os.environ.get('INTEGRATION_TESTS', False)
if INTEGRATION_TESTS:
from compose.cli.command import get_project
from compose.service import BuildAction
class ComposeMixin(object):
"""
Manage docker-compose to ensure that needed services are running during tests
"""
# List of required services to run INTEGRATION_TESTS
COMPOSE_SERVICES = []
# docker-compose.yml dir path
COMPOSE_PROJECT_DIR = '.'
# timeout waiting for health (seconds)
COMPOSE_TIMEOUT = 60
@classmethod
def compose_up(cls):
"""
Ensure *only* the services defined under `COMPOSE_SERVICES` are running and healthy
"""
if INTEGRATION_TESTS and cls.COMPOSE_SERVICES:
cls.compose_project().up(
service_names=cls.COMPOSE_SERVICES,
do_build=BuildAction.force,
timeout=30)
# Wait for them to be healthy
healthy = False
seconds = cls.COMPOSE_TIMEOUT
while not healthy and seconds > 0:
print("Seconds: %d".format(seconds))
seconds -= 1
time.sleep(1)
healthy = True
for container in cls.compose_project().containers(service_names=cls.COMPOSE_SERVICES):
if container.inspect()['State']['Health']['Status'] != 'healthy':
healthy = False
break
if not healthy:
raise Exception('Timeout while waiting for healthy docker-compose services')
@classmethod
def compose_down(cls):
"""
Stop all running containers
"""
if INTEGRATION_TESTS and cls.COMPOSE_SERVICES:
cls.compose_project().kill(service_names=cls.COMPOSE_SERVICES)
@classmethod
def compose_project(cls):
return get_project(cls.COMPOSE_PROJECT_DIR, project_name=os.environ.get('DOCKER_COMPOSE_PROJECT_NAME'))
| roncohen/apm-server | _beats/libbeat/tests/system/beat/compose.py | Python | apache-2.0 | 2,043 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('post', '0006_auto_20150713_1356'),
]
operations = [
migrations.AlterField(
model_name='post',
name='attachment_type',
field=models.CharField(null=True, choices=[('image', 'Image'), ('video', 'Video'), ('file', 'File'), ('audio', 'Audio')], max_length=10, blank=True),
),
]
| 28harishkumar/Social-website-django | post/migrations/0007_auto_20150713_1410.py | Python | mit | 517 |
import clr, sys
clr.AddReference('ZyGames.Framework.Common');
clr.AddReference('ZyGames.Framework');
clr.AddReference('ZyGames.Framework.Game');
clr.AddReference('ZyGames.Tianjiexing.Model');
clr.AddReference('ZyGames.Tianjiexing.BLL');
clr.AddReference('ZyGames.Tianjiexing.Lang');
clr.AddReference('ZyGames.Tianjiexing.Component');
from action import *
from System import *
from random import *
from lang import Lang
from System.Collections.Generic import *
from ZyGames.Framework.Common.Log import *
from ZyGames.Tianjiexing.Model.ConfigModel import *
from ZyGames.Framework.Common import *
from ZyGames.Tianjiexing.Model import *
from ZyGames.Tianjiexing.BLL import *
from ZyGames.Tianjiexing.Lang import *
from ZyGames.Framework.Game.Cache import *
from ZyGames.Framework.Game.Service import *
from ZyGames.Framework.Game.Runtime import *
from ZyGames.Framework.Common import *
from ZyGames.Framework.Cache.Generic import *
from ZyGames.Tianjiexing.Model.Config import *
from ZyGames.Tianjiexing.Model.Enum import *
from ZyGames.Tianjiexing.BLL.Base import *
from ZyGames.Tianjiexing.Component import *
from ZyGames.Framework.Common.Serialization import *
from ZyGames.Tianjiexing.BLL.Base import *
#取宝接口
class UrlParam(HttpParam):
def __init__(self):
HttpParam.__init__(self)
self.LairTreasureType = 0
class ActionResult(DataResult):
def __init__(self):
DataResult.__init__(self)
self.position=0
self.LairRewardName=""
self.LairRewardNum=0
self.LairRewardHead=""
self.ID=0
self.IsItem=0
self.ItemInfo = None
self.LairRewardType = None
def getUrlElement(httpGet, parent):
urlParam = UrlParam();
if httpGet.Contains("LairTreasureType"):
urlParam.LairTreasureType = httpGet.GetEnum[LairTreasureType]("LairTreasureType")
else:
urlParam.Result = False;
return urlParam;
def takeAction(urlParam, parent):
actionResult = ActionResult();
#需要实现
contextUser = parent.Current.User
userExtend = contextUser.UserExtend
if(userExtend.LairDate.Date != DateTime.Now.Date):
userExtend.LairNum = 0
userExtend.LairDate = DateTime.Now
if(userExtend.DaLairDate.Date != DateTime.Now.Date):
userExtend.DaLairNum = 0
userExtend.DaLairDate = DateTime.Now
if(userExtend.ZhongLairDate.Date != DateTime.Now.Date):
userExtend.ZhongLairNum = 0
userExtend.ZhongLairDate = DateTime.Now
LairTreasure=ConfigCacheSet[LairTreasureInfo]().FindKey(MathUtils.ToInt(urlParam.LairTreasureType))
if LairTreasure == None:
parent.ErrorCode = Lang.getLang("ErrorCode");
parent.ErrorInfo = Lang.getLang("LoadError");
actionResult.Result = False;
return actionResult;
maxNum = LairTreasure.LairTreasureNum
lairNum = 0
if urlParam.LairTreasureType == LairTreasureType.ZhuanJiaQuBao:
#maxNum = ConfigEnvSet.GetInt("UserLair.Num")
lairNum = userExtend.LairNum
if urlParam.LairTreasureType == LairTreasureType.DaShiQuBao:
#maxNum = ConfigEnvSet.GetInt("UserDaLair.Num")
lairNum = userExtend.DaLairNum
if urlParam.LairTreasureType == LairTreasureType.ZongShiQuBao:
#maxNum = ConfigEnvSet.GetInt("UserZhongLair.Num")
lairNum = userExtend.ZhongLairNum
if(lairNum >= maxNum):
parent.ErrorCode = Lang.getLang("ErrorCode");
parent.ErrorInfo = Lang.getLang("St12102_LairNumNot");
actionResult.Result = False;
return actionResult;
if LairTreasure.UseType==0:
if contextUser.GameCoin<LairTreasure.UseNum:
parent.ErrorCode = Lang.getLang("ErrorCode");
parent.ErrorInfo = Lang.getLang("St12102_GameCoinNotEnough");
actionResult.Result = False;
return actionResult;
if LairTreasure.UseType==1:
if contextUser.GoldNum<LairTreasure.UseNum:
parent.ErrorCode = 1;
parent.ErrorInfo = Lang.getLang("St12102_PayGoldNotEnough");
actionResult.Result = False;
return actionResult;
#index = RandomUtils.GetRandom(0,LairTreasure.LairTreasureList.Count)
#count =LairTreasure.LairTreasureList.Count
#precent = []
#for lairTreasure in LairTreasure.LairTreasureList:
# precent.append(lairTreasure.Probability*1000)
index = LairTreasuerHelp.GetLaiRewardIndex(LairTreasure.LairTreasureList.ToList())
#index = 0
#actionResult.postion=LairTreasuerHelp.ChestLairTreasuerPosition(urlParam.LairTreasureType)
#LairTreasure=ConfigCacheSet[LairTreasureInfo]().FindKey(MathUtils.ToInt(urlParam.LairTreasureType))
lair=LairTreasure.LairTreasureList[index]
actionResult.position = lair.LairPosition;
actionResult.LairRewardType = lair.LairRewardType
if(lair.LairRewardType == LairRewardType.Gold):
contextUser.GiftGold = MathUtils.Addition(contextUser.GiftGold,lair.Num)
actionResult.LairRewardName = Lang.getLang("Gold")
actionResult.LairRewardNum = lair.Num
actionResult.LairRewardHead = lair.HeadID
actionResult.ID = 0
else:
if(lair.LairRewardType == LairRewardType.GameGoin):
contextUser.GameCoin = MathUtils.Addition(contextUser.GameCoin,lair.Num)
actionResult.LairRewardName = Lang.getLang("GameGoin")
actionResult.LairRewardNum = lair.Num
actionResult.LairRewardHead = lair.HeadID
actionResult.ID = 0
else:
if(lair.LairRewardType == LairRewardType.WuPing):
actionResult.ItemInfo=LairTreasuerHelp.ShowLairReward(lair , contextUser,urlParam.LairTreasureType)
if(actionResult.ItemInfo==None):
parent.ErrorCode = Lang.getLang("ErrorCode")
parent.ErrorInfo = Lang.getLang("LoadError")
actionResult.Result = False
return actionResult
actionResult.LairRewardName = actionResult.ItemInfo.ItemName
actionResult.LairRewardNum = lair.Num
actionResult.LairRewardHead = actionResult.ItemInfo.HeadID;
actionResult.ID = 0
if urlParam.LairTreasureType == LairTreasureType.ZhuanJiaQuBao:
contextUser.UserExtend.LairNum = MathUtils.Addition(contextUser.UserExtend.LairNum,1)
if urlParam.LairTreasureType == LairTreasureType.DaShiQuBao:
contextUser.UserExtend.DaLairNum = MathUtils.Addition(contextUser.UserExtend.DaLairNum,1)
if urlParam.LairTreasureType == LairTreasureType.ZongShiQuBao:
contextUser.UserExtend.ZhongLairNum = MathUtils.Addition(contextUser.UserExtend.ZhongLairNum,1)
contextUser.UserExtend.LairDate = DateTime.Now
if LairTreasure.UseType==0:
contextUser.GameCoin = MathUtils.Subtraction(contextUser.GameCoin,LairTreasure.UseNum)
else:
contextUser.UseGold = MathUtils.Addition(contextUser.UseGold,LairTreasure.UseNum);
return actionResult;
def buildPacket(writer, urlParam, actionResult):
writer.PushIntoStack(actionResult.position)
writer.PushIntoStack(actionResult.LairRewardName)
writer.PushIntoStack(actionResult.LairRewardNum)
writer.PushIntoStack(MathUtils.ToNotNullString(actionResult.LairRewardHead))
writer.PushIntoStack(MathUtils.ToInt(actionResult.LairRewardType))
writer.PushIntoStack(MathUtils.ToInt(actionResult.ID))
return True;
| wenhulove333/ScutServer | Sample/Koudai/Server/src/ZyGames.Tianjiexing.Server/Script/PyScript/Action/action12102.py | Python | mit | 7,626 |
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.antenna', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## log.h (module 'core'): ns3::LogLevel [enumeration]
module.add_enum('LogLevel', ['LOG_NONE', 'LOG_ERROR', 'LOG_LEVEL_ERROR', 'LOG_WARN', 'LOG_LEVEL_WARN', 'LOG_DEBUG', 'LOG_LEVEL_DEBUG', 'LOG_INFO', 'LOG_LEVEL_INFO', 'LOG_FUNCTION', 'LOG_LEVEL_FUNCTION', 'LOG_LOGIC', 'LOG_LEVEL_LOGIC', 'LOG_ALL', 'LOG_LEVEL_ALL', 'LOG_PREFIX_FUNC', 'LOG_PREFIX_TIME', 'LOG_PREFIX_NODE', 'LOG_PREFIX_LEVEL', 'LOG_PREFIX_ALL'], import_from_module='ns.core')
## angles.h (module 'antenna'): ns3::Angles [class]
module.add_class('Angles')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
typehandlers.add_type_alias('std::list< ns3::AttributeConstructionList::Item > const_iterator', 'ns3::AttributeConstructionList::CIterator')
typehandlers.add_type_alias('std::list< ns3::AttributeConstructionList::Item > const_iterator*', 'ns3::AttributeConstructionList::CIterator*')
typehandlers.add_type_alias('std::list< ns3::AttributeConstructionList::Item > const_iterator&', 'ns3::AttributeConstructionList::CIterator&')
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeAccessor> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeChecker> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeChecker'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeValue> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeValue'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::CallbackImplBase> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::Hash::Implementation> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::TraceSourceAccessor> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor'])
## hash.h (module 'core'): ns3::Hasher [class]
module.add_class('Hasher', import_from_module='ns.core')
## log.h (module 'core'): ns3::LogComponent [class]
module.add_class('LogComponent', import_from_module='ns.core')
typehandlers.add_type_alias('std::map< std::string, ns3::LogComponent * >', 'ns3::LogComponent::ComponentList')
typehandlers.add_type_alias('std::map< std::string, ns3::LogComponent * >*', 'ns3::LogComponent::ComponentList*')
typehandlers.add_type_alias('std::map< std::string, ns3::LogComponent * >&', 'ns3::LogComponent::ComponentList&')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## log.h (module 'core'): ns3::ParameterLogger [class]
module.add_class('ParameterLogger', import_from_module='ns.core')
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::ObjectBase'], template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'])
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::SupportLevel [enumeration]
module.add_enum('SupportLevel', ['SUPPORTED', 'DEPRECATED', 'OBSOLETE'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
typehandlers.add_type_alias('uint32_t', 'ns3::TypeId::hash_t')
typehandlers.add_type_alias('uint32_t*', 'ns3::TypeId::hash_t*')
typehandlers.add_type_alias('uint32_t&', 'ns3::TypeId::hash_t&')
## vector.h (module 'core'): ns3::Vector2D [class]
module.add_class('Vector2D', import_from_module='ns.core')
## vector.h (module 'core'): ns3::Vector3D [class]
module.add_class('Vector3D', import_from_module='ns.core')
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
## phased-array-model.h (module 'antenna'): ns3::PhasedArrayModel [class]
module.add_class('PhasedArrayModel', parent=root_module['ns3::Object'])
typehandlers.add_type_alias('std::vector< std::complex< double > >', 'ns3::PhasedArrayModel::ComplexVector')
typehandlers.add_type_alias('std::vector< std::complex< double > >*', 'ns3::PhasedArrayModel::ComplexVector*')
typehandlers.add_type_alias('std::vector< std::complex< double > >&', 'ns3::PhasedArrayModel::ComplexVector&')
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## uniform-planar-array.h (module 'antenna'): ns3::UniformPlanarArray [class]
module.add_class('UniformPlanarArray', parent=root_module['ns3::PhasedArrayModel'])
## antenna-model.h (module 'antenna'): ns3::AntennaModel [class]
module.add_class('AntennaModel', parent=root_module['ns3::Object'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## cosine-antenna-model.h (module 'antenna'): ns3::CosineAntennaModel [class]
module.add_class('CosineAntennaModel', parent=root_module['ns3::AntennaModel'])
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor [class]
module.add_class('EmptyAttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::AttributeAccessor'])
## attribute.h (module 'core'): ns3::EmptyAttributeChecker [class]
module.add_class('EmptyAttributeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## isotropic-antenna-model.h (module 'antenna'): ns3::IsotropicAntennaModel [class]
module.add_class('IsotropicAntennaModel', parent=root_module['ns3::AntennaModel'])
## parabolic-antenna-model.h (module 'antenna'): ns3::ParabolicAntennaModel [class]
module.add_class('ParabolicAntennaModel', parent=root_module['ns3::AntennaModel'])
## three-gpp-antenna-model.h (module 'antenna'): ns3::ThreeGppAntennaModel [class]
module.add_class('ThreeGppAntennaModel', parent=root_module['ns3::AntennaModel'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## vector.h (module 'core'): ns3::Vector2DChecker [class]
module.add_class('Vector2DChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## vector.h (module 'core'): ns3::Vector2DValue [class]
module.add_class('Vector2DValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## vector.h (module 'core'): ns3::Vector3DChecker [class]
module.add_class('Vector3DChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## vector.h (module 'core'): ns3::Vector3DValue [class]
module.add_class('Vector3DValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## callback.h (module 'core'): ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> [class]
module.add_class('CallbackImpl', import_from_module='ns.core', parent=root_module['ns3::CallbackImplBase'], template_parameters=['ns3::ObjectBase *', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'])
module.add_container('std::vector< double >', 'double', container_type='vector')
module.add_container('std::map< std::string, ns3::LogComponent * >', ('std::string', 'ns3::LogComponent *'), container_type='map')
module.add_container('std::vector< std::complex< double > >', 'std::complex< double >', container_type='vector')
module.add_container('ns3::PhasedArrayModel::ComplexVector', 'std::complex< double >', container_type='vector')
typehandlers.add_type_alias('ns3::Vector3D', 'ns3::Vector')
typehandlers.add_type_alias('ns3::Vector3D*', 'ns3::Vector*')
typehandlers.add_type_alias('ns3::Vector3D&', 'ns3::Vector&')
module.add_typedef(root_module['ns3::Vector3D'], 'Vector')
typehandlers.add_type_alias('ns3::Vector3DValue', 'ns3::VectorValue')
typehandlers.add_type_alias('ns3::Vector3DValue*', 'ns3::VectorValue*')
typehandlers.add_type_alias('ns3::Vector3DValue&', 'ns3::VectorValue&')
module.add_typedef(root_module['ns3::Vector3DValue'], 'VectorValue')
typehandlers.add_type_alias('ns3::Vector3DChecker', 'ns3::VectorChecker')
typehandlers.add_type_alias('ns3::Vector3DChecker*', 'ns3::VectorChecker*')
typehandlers.add_type_alias('ns3::Vector3DChecker&', 'ns3::VectorChecker&')
module.add_typedef(root_module['ns3::Vector3DChecker'], 'VectorChecker')
typehandlers.add_type_alias('void ( * ) ( std::ostream & )', 'ns3::TimePrinter')
typehandlers.add_type_alias('void ( * ) ( std::ostream & )*', 'ns3::TimePrinter*')
typehandlers.add_type_alias('void ( * ) ( std::ostream & )&', 'ns3::TimePrinter&')
typehandlers.add_type_alias('void ( * ) ( std::ostream & )', 'ns3::NodePrinter')
typehandlers.add_type_alias('void ( * ) ( std::ostream & )*', 'ns3::NodePrinter*')
typehandlers.add_type_alias('void ( * ) ( std::ostream & )&', 'ns3::NodePrinter&')
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace Hash
nested_module = module.add_cpp_namespace('Hash')
register_types_ns3_Hash(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_Hash(module):
root_module = module.get_root()
## hash-function.h (module 'core'): ns3::Hash::Implementation [class]
module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
typehandlers.add_type_alias('uint32_t ( * ) ( char const *, std::size_t const )', 'ns3::Hash::Hash32Function_ptr')
typehandlers.add_type_alias('uint32_t ( * ) ( char const *, std::size_t const )*', 'ns3::Hash::Hash32Function_ptr*')
typehandlers.add_type_alias('uint32_t ( * ) ( char const *, std::size_t const )&', 'ns3::Hash::Hash32Function_ptr&')
typehandlers.add_type_alias('uint64_t ( * ) ( char const *, std::size_t const )', 'ns3::Hash::Hash64Function_ptr')
typehandlers.add_type_alias('uint64_t ( * ) ( char const *, std::size_t const )*', 'ns3::Hash::Hash64Function_ptr*')
typehandlers.add_type_alias('uint64_t ( * ) ( char const *, std::size_t const )&', 'ns3::Hash::Hash64Function_ptr&')
## Register a nested module for the namespace Function
nested_module = module.add_cpp_namespace('Function')
register_types_ns3_Hash_Function(nested_module)
def register_types_ns3_Hash_Function(module):
root_module = module.get_root()
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class]
module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class]
module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class]
module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class]
module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
def register_methods(root_module):
register_Ns3Angles_methods(root_module, root_module['ns3::Angles'])
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3DefaultDeleter__Ns3AttributeAccessor_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeAccessor >'])
register_Ns3DefaultDeleter__Ns3AttributeChecker_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeChecker >'])
register_Ns3DefaultDeleter__Ns3AttributeValue_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeValue >'])
register_Ns3DefaultDeleter__Ns3CallbackImplBase_methods(root_module, root_module['ns3::DefaultDeleter< ns3::CallbackImplBase >'])
register_Ns3DefaultDeleter__Ns3HashImplementation_methods(root_module, root_module['ns3::DefaultDeleter< ns3::Hash::Implementation >'])
register_Ns3DefaultDeleter__Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::DefaultDeleter< ns3::TraceSourceAccessor >'])
register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher'])
register_Ns3LogComponent_methods(root_module, root_module['ns3::LogComponent'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3ParameterLogger_methods(root_module, root_module['ns3::ParameterLogger'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Vector2D_methods(root_module, root_module['ns3::Vector2D'])
register_Ns3Vector3D_methods(root_module, root_module['ns3::Vector3D'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3PhasedArrayModel_methods(root_module, root_module['ns3::PhasedArrayModel'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3UniformPlanarArray_methods(root_module, root_module['ns3::UniformPlanarArray'])
register_Ns3AntennaModel_methods(root_module, root_module['ns3::AntennaModel'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3CosineAntennaModel_methods(root_module, root_module['ns3::CosineAntennaModel'])
register_Ns3EmptyAttributeAccessor_methods(root_module, root_module['ns3::EmptyAttributeAccessor'])
register_Ns3EmptyAttributeChecker_methods(root_module, root_module['ns3::EmptyAttributeChecker'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3IsotropicAntennaModel_methods(root_module, root_module['ns3::IsotropicAntennaModel'])
register_Ns3ParabolicAntennaModel_methods(root_module, root_module['ns3::ParabolicAntennaModel'])
register_Ns3ThreeGppAntennaModel_methods(root_module, root_module['ns3::ThreeGppAntennaModel'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3Vector2DChecker_methods(root_module, root_module['ns3::Vector2DChecker'])
register_Ns3Vector2DValue_methods(root_module, root_module['ns3::Vector2DValue'])
register_Ns3Vector3DChecker_methods(root_module, root_module['ns3::Vector3DChecker'])
register_Ns3Vector3DValue_methods(root_module, root_module['ns3::Vector3DValue'])
register_Ns3CallbackImpl__Ns3ObjectBase___star___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation'])
register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a'])
register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32'])
register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64'])
register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3'])
return
def register_Ns3Angles_methods(root_module, cls):
cls.add_output_stream_operator()
## angles.h (module 'antenna'): ns3::Angles::Angles(ns3::Angles const & arg0) [constructor]
cls.add_constructor([param('ns3::Angles const &', 'arg0')])
## angles.h (module 'antenna'): ns3::Angles::Angles(double azimuth, double inclination) [constructor]
cls.add_constructor([param('double', 'azimuth'), param('double', 'inclination')])
## angles.h (module 'antenna'): ns3::Angles::Angles(ns3::Vector v) [constructor]
cls.add_constructor([param('ns3::Vector', 'v')])
## angles.h (module 'antenna'): ns3::Angles::Angles(ns3::Vector v, ns3::Vector o) [constructor]
cls.add_constructor([param('ns3::Vector', 'v'), param('ns3::Vector', 'o')])
## angles.h (module 'antenna'): double ns3::Angles::GetAzimuth() const [member function]
cls.add_method('GetAzimuth',
'double',
[],
is_const=True)
## angles.h (module 'antenna'): double ns3::Angles::GetInclination() const [member function]
cls.add_method('GetInclination',
'double',
[],
is_const=True)
## angles.h (module 'antenna'): void ns3::Angles::SetAzimuth(double azimuth) [member function]
cls.add_method('SetAzimuth',
'void',
[param('double', 'azimuth')])
## angles.h (module 'antenna'): void ns3::Angles::SetInclination(double inclination) [member function]
cls.add_method('SetInclination',
'void',
[param('double', 'inclination')])
## angles.h (module 'antenna'): ns3::Angles::m_printDeg [variable]
cls.add_static_attribute('m_printDeg', 'bool', is_const=False)
return
def register_Ns3AttributeConstructionList_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [constructor]
cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<const ns3::AttributeChecker> checker, ns3::Ptr<ns3::AttributeValue> value) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::CIterator ns3::AttributeConstructionList::Begin() const [member function]
cls.add_method('Begin',
'ns3::AttributeConstructionList::CIterator',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::CIterator ns3::AttributeConstructionList::End() const [member function]
cls.add_method('End',
'ns3::AttributeConstructionList::CIterator',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('Find',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True)
return
def register_Ns3AttributeConstructionListItem_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [constructor]
cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable]
cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False)
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
return
def register_Ns3DefaultDeleter__Ns3AttributeAccessor_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeAccessor>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeAccessor>::DefaultDeleter(ns3::DefaultDeleter<ns3::AttributeAccessor> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::AttributeAccessor > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::AttributeAccessor>::Delete(ns3::AttributeAccessor * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::AttributeAccessor *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3AttributeChecker_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeChecker>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeChecker>::DefaultDeleter(ns3::DefaultDeleter<ns3::AttributeChecker> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::AttributeChecker > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::AttributeChecker>::Delete(ns3::AttributeChecker * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::AttributeChecker *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3AttributeValue_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeValue>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeValue>::DefaultDeleter(ns3::DefaultDeleter<ns3::AttributeValue> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::AttributeValue > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::AttributeValue>::Delete(ns3::AttributeValue * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::AttributeValue *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3CallbackImplBase_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::CallbackImplBase>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::CallbackImplBase>::DefaultDeleter(ns3::DefaultDeleter<ns3::CallbackImplBase> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::CallbackImplBase > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::CallbackImplBase>::Delete(ns3::CallbackImplBase * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::CallbackImplBase *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3HashImplementation_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::Hash::Implementation>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::Hash::Implementation>::DefaultDeleter(ns3::DefaultDeleter<ns3::Hash::Implementation> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::Hash::Implementation > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::Hash::Implementation>::Delete(ns3::Hash::Implementation * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Hash::Implementation *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3TraceSourceAccessor_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::TraceSourceAccessor>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::TraceSourceAccessor>::DefaultDeleter(ns3::DefaultDeleter<ns3::TraceSourceAccessor> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::TraceSourceAccessor > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::TraceSourceAccessor>::Delete(ns3::TraceSourceAccessor * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::TraceSourceAccessor *', 'object')],
is_static=True)
return
def register_Ns3Hasher_methods(root_module, cls):
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [constructor]
cls.add_constructor([param('ns3::Hasher const &', 'arg0')])
## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor]
cls.add_constructor([])
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('std::string const', 's')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('std::string const', 's')])
## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function]
cls.add_method('clear',
'ns3::Hasher &',
[])
return
def register_Ns3LogComponent_methods(root_module, cls):
## log.h (module 'core'): ns3::LogComponent::LogComponent(ns3::LogComponent const & arg0) [constructor]
cls.add_constructor([param('ns3::LogComponent const &', 'arg0')])
## log.h (module 'core'): ns3::LogComponent::LogComponent(std::string const & name, std::string const & file, ns3::LogLevel const mask=::ns3::LogLevel::LOG_NONE) [constructor]
cls.add_constructor([param('std::string const &', 'name'), param('std::string const &', 'file'), param('ns3::LogLevel const', 'mask', default_value='::ns3::LogLevel::LOG_NONE')])
## log.h (module 'core'): void ns3::LogComponent::Disable(ns3::LogLevel const level) [member function]
cls.add_method('Disable',
'void',
[param('ns3::LogLevel const', 'level')])
## log.h (module 'core'): void ns3::LogComponent::Enable(ns3::LogLevel const level) [member function]
cls.add_method('Enable',
'void',
[param('ns3::LogLevel const', 'level')])
## log.h (module 'core'): std::string ns3::LogComponent::File() const [member function]
cls.add_method('File',
'std::string',
[],
is_const=True)
## log.h (module 'core'): static ns3::LogComponent::ComponentList * ns3::LogComponent::GetComponentList() [member function]
cls.add_method('GetComponentList',
'ns3::LogComponent::ComponentList *',
[],
is_static=True)
## log.h (module 'core'): static std::string ns3::LogComponent::GetLevelLabel(ns3::LogLevel const level) [member function]
cls.add_method('GetLevelLabel',
'std::string',
[param('ns3::LogLevel const', 'level')],
is_static=True)
## log.h (module 'core'): bool ns3::LogComponent::IsEnabled(ns3::LogLevel const level) const [member function]
cls.add_method('IsEnabled',
'bool',
[param('ns3::LogLevel const', 'level')],
is_const=True)
## log.h (module 'core'): bool ns3::LogComponent::IsNoneEnabled() const [member function]
cls.add_method('IsNoneEnabled',
'bool',
[],
is_const=True)
## log.h (module 'core'): char const * ns3::LogComponent::Name() const [member function]
cls.add_method('Name',
'char const *',
[],
is_const=True)
## log.h (module 'core'): void ns3::LogComponent::SetMask(ns3::LogLevel const level) [member function]
cls.add_method('SetMask',
'void',
[param('ns3::LogLevel const', 'level')])
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
is_virtual=True, visibility='protected')
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3ParameterLogger_methods(root_module, cls):
## log.h (module 'core'): ns3::ParameterLogger::ParameterLogger(ns3::ParameterLogger const & arg0) [constructor]
cls.add_constructor([param('ns3::ParameterLogger const &', 'arg0')])
## log.h (module 'core'): ns3::ParameterLogger::ParameterLogger(std::ostream & os) [constructor]
cls.add_constructor([param('std::ostream &', 'os')])
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<const ns3::AttributeAccessor> accessor, ns3::Ptr<const ns3::AttributeChecker> checker, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SupportLevel::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SupportLevel::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<const ns3::AttributeAccessor> accessor, ns3::Ptr<const ns3::AttributeChecker> checker, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SupportLevel::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SupportLevel::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<const ns3::TraceSourceAccessor> accessor, std::string callback, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SupportLevel::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor'), param('std::string', 'callback'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SupportLevel::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(std::size_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('std::size_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(std::size_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('std::size_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::size_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'std::size_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId::hash_t ns3::TypeId::GetHash() const [member function]
cls.add_method('GetHash',
'ns3::TypeId::hash_t',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint16_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint16_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint16_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint16_t',
[],
is_static=True)
## type-id.h (module 'core'): std::size_t ns3::TypeId::GetSize() const [member function]
cls.add_method('GetSize',
'std::size_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(std::size_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('std::size_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::size_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'std::size_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(ns3::TypeId::hash_t hash) [member function]
cls.add_method('LookupByHash',
'ns3::TypeId',
[param('uint32_t', 'hash')],
is_static=True)
## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(ns3::TypeId::hash_t hash, ns3::TypeId * tid) [member function]
cls.add_method('LookupByHashFailSafe',
'bool',
[param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')],
is_static=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<const ns3::TraceSourceAccessor> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): ns3::Ptr<const ns3::TraceSourceAccessor> ns3::TypeId::LookupTraceSourceByName(std::string name, ns3::TypeId::TraceSourceInformation * info) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name'), param('ns3::TypeId::TraceSourceInformation *', 'info')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(std::size_t i, ns3::Ptr<const ns3::AttributeValue> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('std::size_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetSize(std::size_t size) [member function]
cls.add_method('SetSize',
'ns3::TypeId',
[param('std::size_t', 'size')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t uid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'uid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::supportLevel [variable]
cls.add_instance_attribute('supportLevel', 'ns3::TypeId::SupportLevel', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::supportMsg [variable]
cls.add_instance_attribute('supportMsg', 'std::string', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::callback [variable]
cls.add_instance_attribute('callback', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::supportLevel [variable]
cls.add_instance_attribute('supportLevel', 'ns3::TypeId::SupportLevel', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::supportMsg [variable]
cls.add_instance_attribute('supportMsg', 'std::string', is_const=False)
return
def register_Ns3Vector2D_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<')
cls.add_binary_numeric_operator('-', root_module['ns3::Vector2D'], root_module['ns3::Vector2D'], param('ns3::Vector2D const &', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::Vector2D'], root_module['ns3::Vector2D'], param('ns3::Vector2D const &', 'right'))
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('<=')
## vector.h (module 'core'): ns3::Vector2D::Vector2D(ns3::Vector2D const & arg0) [constructor]
cls.add_constructor([param('ns3::Vector2D const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector2D::Vector2D(double _x, double _y) [constructor]
cls.add_constructor([param('double', '_x'), param('double', '_y')])
## vector.h (module 'core'): ns3::Vector2D::Vector2D() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): double ns3::Vector2D::GetLength() const [member function]
cls.add_method('GetLength',
'double',
[],
is_const=True)
## vector.h (module 'core'): double ns3::Vector2D::GetLengthSquared() const [member function]
cls.add_method('GetLengthSquared',
'double',
[],
is_const=True)
## vector.h (module 'core'): ns3::Vector2D::x [variable]
cls.add_instance_attribute('x', 'double', is_const=False)
## vector.h (module 'core'): ns3::Vector2D::y [variable]
cls.add_instance_attribute('y', 'double', is_const=False)
return
def register_Ns3Vector3D_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<')
cls.add_binary_numeric_operator('-', root_module['ns3::Vector3D'], root_module['ns3::Vector3D'], param('ns3::Vector3D const &', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::Vector3D'], root_module['ns3::Vector3D'], param('ns3::Vector3D const &', 'right'))
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('<=')
## vector.h (module 'core'): ns3::Vector3D::Vector3D(ns3::Vector3D const & arg0) [constructor]
cls.add_constructor([param('ns3::Vector3D const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector3D::Vector3D(double _x, double _y, double _z) [constructor]
cls.add_constructor([param('double', '_x'), param('double', '_y'), param('double', '_z')])
## vector.h (module 'core'): ns3::Vector3D::Vector3D() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): double ns3::Vector3D::GetLength() const [member function]
cls.add_method('GetLength',
'double',
[],
is_const=True)
## vector.h (module 'core'): double ns3::Vector3D::GetLengthSquared() const [member function]
cls.add_method('GetLengthSquared',
'double',
[],
is_const=True)
## vector.h (module 'core'): ns3::Vector3D::x [variable]
cls.add_instance_attribute('x', 'double', is_const=False)
## vector.h (module 'core'): ns3::Vector3D::y [variable]
cls.add_instance_attribute('y', 'double', is_const=False)
## vector.h (module 'core'): ns3::Vector3D::z [variable]
cls.add_instance_attribute('z', 'double', is_const=False)
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3Object_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h (module 'core'): void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h (module 'core'): ns3::Ptr<ns3::Object> ns3::Object::GetObject() const [member function]
cls.add_method('GetObject',
'ns3::Ptr< ns3::Object >',
[],
custom_template_method_name='GetObject', is_const=True, template_parameters=['ns3::Object'])
## object.h (module 'core'): ns3::Ptr<ns3::Object> ns3::Object::GetObject(ns3::TypeId tid) const [member function]
cls.add_method('GetObject',
'ns3::Ptr< ns3::Object >',
[param('ns3::TypeId', 'tid')],
custom_template_method_name='GetObject', is_const=True, template_parameters=['ns3::Object'])
## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h (module 'core'): void ns3::Object::Initialize() [member function]
cls.add_method('Initialize',
'void',
[])
## object.h (module 'core'): bool ns3::Object::IsInitialized() const [member function]
cls.add_method('IsInitialized',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h (module 'core'): void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True, visibility='protected')
## object.h (module 'core'): void ns3::Object::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
is_virtual=True, visibility='protected')
## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
is_virtual=True, visibility='protected')
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Ptr<const ns3::Object> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3PhasedArrayModel_methods(root_module, cls):
## phased-array-model.h (module 'antenna'): ns3::PhasedArrayModel::PhasedArrayModel(ns3::PhasedArrayModel const & arg0) [constructor]
cls.add_constructor([param('ns3::PhasedArrayModel const &', 'arg0')])
## phased-array-model.h (module 'antenna'): ns3::PhasedArrayModel::PhasedArrayModel() [constructor]
cls.add_constructor([])
## phased-array-model.h (module 'antenna'): ns3::Ptr<const ns3::AntennaModel> ns3::PhasedArrayModel::GetAntennaElement() const [member function]
cls.add_method('GetAntennaElement',
'ns3::Ptr< ns3::AntennaModel const >',
[],
is_const=True)
## phased-array-model.h (module 'antenna'): ns3::PhasedArrayModel::ComplexVector ns3::PhasedArrayModel::GetBeamformingVector() const [member function]
cls.add_method('GetBeamformingVector',
'ns3::PhasedArrayModel::ComplexVector',
[],
is_const=True)
## phased-array-model.h (module 'antenna'): ns3::PhasedArrayModel::ComplexVector ns3::PhasedArrayModel::GetBeamformingVector(ns3::Angles a) const [member function]
cls.add_method('GetBeamformingVector',
'ns3::PhasedArrayModel::ComplexVector',
[param('ns3::Angles', 'a')],
is_const=True)
## phased-array-model.h (module 'antenna'): std::pair<double, double> ns3::PhasedArrayModel::GetElementFieldPattern(ns3::Angles a) const [member function]
cls.add_method('GetElementFieldPattern',
'std::pair< double, double >',
[param('ns3::Angles', 'a')],
is_const=True, is_pure_virtual=True, is_virtual=True)
## phased-array-model.h (module 'antenna'): ns3::Vector ns3::PhasedArrayModel::GetElementLocation(uint64_t index) const [member function]
cls.add_method('GetElementLocation',
'ns3::Vector',
[param('uint64_t', 'index')],
is_const=True, is_pure_virtual=True, is_virtual=True)
## phased-array-model.h (module 'antenna'): uint32_t ns3::PhasedArrayModel::GetId() const [member function]
cls.add_method('GetId',
'uint32_t',
[],
is_const=True)
## phased-array-model.h (module 'antenna'): uint64_t ns3::PhasedArrayModel::GetNumberOfElements() const [member function]
cls.add_method('GetNumberOfElements',
'uint64_t',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## phased-array-model.h (module 'antenna'): ns3::PhasedArrayModel::ComplexVector ns3::PhasedArrayModel::GetSteeringVector(ns3::Angles a) const [member function]
cls.add_method('GetSteeringVector',
'ns3::PhasedArrayModel::ComplexVector',
[param('ns3::Angles', 'a')],
is_const=True)
## phased-array-model.h (module 'antenna'): static ns3::TypeId ns3::PhasedArrayModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## phased-array-model.h (module 'antenna'): void ns3::PhasedArrayModel::SetAntennaElement(ns3::Ptr<ns3::AntennaModel> antennaElement) [member function]
cls.add_method('SetAntennaElement',
'void',
[param('ns3::Ptr< ns3::AntennaModel >', 'antennaElement')])
## phased-array-model.h (module 'antenna'): void ns3::PhasedArrayModel::SetBeamformingVector(ns3::PhasedArrayModel::ComplexVector const & beamformingVector) [member function]
cls.add_method('SetBeamformingVector',
'void',
[param('ns3::PhasedArrayModel::ComplexVector const &', 'beamformingVector')])
## phased-array-model.h (module 'antenna'): static double ns3::PhasedArrayModel::ComputeNorm(ns3::PhasedArrayModel::ComplexVector const & vector) [member function]
cls.add_method('ComputeNorm',
'double',
[param('ns3::PhasedArrayModel::ComplexVector const &', 'vector')],
is_static=True, visibility='protected')
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_const=True, is_pure_virtual=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_const=True, is_pure_virtual=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_const=True, is_pure_virtual=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_const=True, is_pure_virtual=True, is_virtual=True)
return
def register_Ns3UniformPlanarArray_methods(root_module, cls):
## uniform-planar-array.h (module 'antenna'): ns3::UniformPlanarArray::UniformPlanarArray(ns3::UniformPlanarArray const & arg0) [constructor]
cls.add_constructor([param('ns3::UniformPlanarArray const &', 'arg0')])
## uniform-planar-array.h (module 'antenna'): ns3::UniformPlanarArray::UniformPlanarArray() [constructor]
cls.add_constructor([])
## uniform-planar-array.h (module 'antenna'): std::pair<double, double> ns3::UniformPlanarArray::GetElementFieldPattern(ns3::Angles a) const [member function]
cls.add_method('GetElementFieldPattern',
'std::pair< double, double >',
[param('ns3::Angles', 'a')],
is_const=True, is_virtual=True)
## uniform-planar-array.h (module 'antenna'): ns3::Vector ns3::UniformPlanarArray::GetElementLocation(uint64_t index) const [member function]
cls.add_method('GetElementLocation',
'ns3::Vector',
[param('uint64_t', 'index')],
is_const=True, is_virtual=True)
## uniform-planar-array.h (module 'antenna'): uint64_t ns3::UniformPlanarArray::GetNumberOfElements() const [member function]
cls.add_method('GetNumberOfElements',
'uint64_t',
[],
is_const=True, is_virtual=True)
## uniform-planar-array.h (module 'antenna'): static ns3::TypeId ns3::UniformPlanarArray::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3AntennaModel_methods(root_module, cls):
## antenna-model.h (module 'antenna'): ns3::AntennaModel::AntennaModel(ns3::AntennaModel const & arg0) [constructor]
cls.add_constructor([param('ns3::AntennaModel const &', 'arg0')])
## antenna-model.h (module 'antenna'): ns3::AntennaModel::AntennaModel() [constructor]
cls.add_constructor([])
## antenna-model.h (module 'antenna'): double ns3::AntennaModel::GetGainDb(ns3::Angles a) [member function]
cls.add_method('GetGainDb',
'double',
[param('ns3::Angles', 'a')],
is_pure_virtual=True, is_virtual=True)
## antenna-model.h (module 'antenna'): static ns3::TypeId ns3::AntennaModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_const=True, is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_const=True, is_pure_virtual=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_const=True, is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_const=True, is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_pure_virtual=True, is_virtual=True)
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): std::string ns3::CallbackImplBase::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<const ns3::CallbackImplBase> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_const=True, is_pure_virtual=True, is_virtual=True)
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, template_parameters=['ns3::ObjectBase*'], visibility='protected')
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3CosineAntennaModel_methods(root_module, cls):
## cosine-antenna-model.h (module 'antenna'): ns3::CosineAntennaModel::CosineAntennaModel() [constructor]
cls.add_constructor([])
## cosine-antenna-model.h (module 'antenna'): ns3::CosineAntennaModel::CosineAntennaModel(ns3::CosineAntennaModel const & arg0) [constructor]
cls.add_constructor([param('ns3::CosineAntennaModel const &', 'arg0')])
## cosine-antenna-model.h (module 'antenna'): double ns3::CosineAntennaModel::GetGainDb(ns3::Angles a) [member function]
cls.add_method('GetGainDb',
'double',
[param('ns3::Angles', 'a')],
is_virtual=True)
## cosine-antenna-model.h (module 'antenna'): double ns3::CosineAntennaModel::GetHorizontalBeamwidth() const [member function]
cls.add_method('GetHorizontalBeamwidth',
'double',
[],
is_const=True)
## cosine-antenna-model.h (module 'antenna'): double ns3::CosineAntennaModel::GetOrientation() const [member function]
cls.add_method('GetOrientation',
'double',
[],
is_const=True)
## cosine-antenna-model.h (module 'antenna'): static ns3::TypeId ns3::CosineAntennaModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## cosine-antenna-model.h (module 'antenna'): double ns3::CosineAntennaModel::GetVerticalBeamwidth() const [member function]
cls.add_method('GetVerticalBeamwidth',
'double',
[],
is_const=True)
return
def register_Ns3EmptyAttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor::EmptyAttributeAccessor(ns3::EmptyAttributeAccessor const & arg0) [constructor]
cls.add_constructor([param('ns3::EmptyAttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor::EmptyAttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object'), param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True)
return
def register_Ns3EmptyAttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeChecker::EmptyAttributeChecker(ns3::EmptyAttributeChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::EmptyAttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeChecker::EmptyAttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_const=True, is_virtual=True)
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True, visibility='private')
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True, visibility='private')
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True, visibility='private')
return
def register_Ns3IsotropicAntennaModel_methods(root_module, cls):
## isotropic-antenna-model.h (module 'antenna'): ns3::IsotropicAntennaModel::IsotropicAntennaModel(ns3::IsotropicAntennaModel const & arg0) [constructor]
cls.add_constructor([param('ns3::IsotropicAntennaModel const &', 'arg0')])
## isotropic-antenna-model.h (module 'antenna'): ns3::IsotropicAntennaModel::IsotropicAntennaModel() [constructor]
cls.add_constructor([])
## isotropic-antenna-model.h (module 'antenna'): double ns3::IsotropicAntennaModel::GetGainDb(ns3::Angles a) [member function]
cls.add_method('GetGainDb',
'double',
[param('ns3::Angles', 'a')],
is_virtual=True)
## isotropic-antenna-model.h (module 'antenna'): static ns3::TypeId ns3::IsotropicAntennaModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3ParabolicAntennaModel_methods(root_module, cls):
## parabolic-antenna-model.h (module 'antenna'): ns3::ParabolicAntennaModel::ParabolicAntennaModel() [constructor]
cls.add_constructor([])
## parabolic-antenna-model.h (module 'antenna'): ns3::ParabolicAntennaModel::ParabolicAntennaModel(ns3::ParabolicAntennaModel const & arg0) [constructor]
cls.add_constructor([param('ns3::ParabolicAntennaModel const &', 'arg0')])
## parabolic-antenna-model.h (module 'antenna'): double ns3::ParabolicAntennaModel::GetBeamwidth() const [member function]
cls.add_method('GetBeamwidth',
'double',
[],
is_const=True)
## parabolic-antenna-model.h (module 'antenna'): double ns3::ParabolicAntennaModel::GetGainDb(ns3::Angles a) [member function]
cls.add_method('GetGainDb',
'double',
[param('ns3::Angles', 'a')],
is_virtual=True)
## parabolic-antenna-model.h (module 'antenna'): double ns3::ParabolicAntennaModel::GetOrientation() const [member function]
cls.add_method('GetOrientation',
'double',
[],
is_const=True)
## parabolic-antenna-model.h (module 'antenna'): static ns3::TypeId ns3::ParabolicAntennaModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## parabolic-antenna-model.h (module 'antenna'): void ns3::ParabolicAntennaModel::SetBeamwidth(double beamwidthDegrees) [member function]
cls.add_method('SetBeamwidth',
'void',
[param('double', 'beamwidthDegrees')])
## parabolic-antenna-model.h (module 'antenna'): void ns3::ParabolicAntennaModel::SetOrientation(double orientationDegrees) [member function]
cls.add_method('SetOrientation',
'void',
[param('double', 'orientationDegrees')])
return
def register_Ns3ThreeGppAntennaModel_methods(root_module, cls):
## three-gpp-antenna-model.h (module 'antenna'): ns3::ThreeGppAntennaModel::ThreeGppAntennaModel(ns3::ThreeGppAntennaModel const & arg0) [constructor]
cls.add_constructor([param('ns3::ThreeGppAntennaModel const &', 'arg0')])
## three-gpp-antenna-model.h (module 'antenna'): ns3::ThreeGppAntennaModel::ThreeGppAntennaModel() [constructor]
cls.add_constructor([])
## three-gpp-antenna-model.h (module 'antenna'): double ns3::ThreeGppAntennaModel::GetAntennaElementGain() const [member function]
cls.add_method('GetAntennaElementGain',
'double',
[],
is_const=True)
## three-gpp-antenna-model.h (module 'antenna'): double ns3::ThreeGppAntennaModel::GetGainDb(ns3::Angles a) [member function]
cls.add_method('GetGainDb',
'double',
[param('ns3::Angles', 'a')],
is_virtual=True)
## three-gpp-antenna-model.h (module 'antenna'): double ns3::ThreeGppAntennaModel::GetHorizontalBeamwidth() const [member function]
cls.add_method('GetHorizontalBeamwidth',
'double',
[],
is_const=True)
## three-gpp-antenna-model.h (module 'antenna'): double ns3::ThreeGppAntennaModel::GetMaxAttenuation() const [member function]
cls.add_method('GetMaxAttenuation',
'double',
[],
is_const=True)
## three-gpp-antenna-model.h (module 'antenna'): double ns3::ThreeGppAntennaModel::GetSlaV() const [member function]
cls.add_method('GetSlaV',
'double',
[],
is_const=True)
## three-gpp-antenna-model.h (module 'antenna'): static ns3::TypeId ns3::ThreeGppAntennaModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## three-gpp-antenna-model.h (module 'antenna'): double ns3::ThreeGppAntennaModel::GetVerticalBeamwidth() const [member function]
cls.add_method('GetVerticalBeamwidth',
'double',
[],
is_const=True)
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_Ns3Vector2DChecker_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector2DChecker::Vector2DChecker() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector2DChecker::Vector2DChecker(ns3::Vector2DChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::Vector2DChecker const &', 'arg0')])
return
def register_Ns3Vector2DValue_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue(ns3::Vector2D const & value) [constructor]
cls.add_constructor([param('ns3::Vector2D const &', 'value')])
## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue(ns3::Vector2DValue const & arg0) [constructor]
cls.add_constructor([param('ns3::Vector2DValue const &', 'arg0')])
## vector.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::Vector2DValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## vector.h (module 'core'): bool ns3::Vector2DValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## vector.h (module 'core'): ns3::Vector2D ns3::Vector2DValue::Get() const [member function]
cls.add_method('Get',
'ns3::Vector2D',
[],
is_const=True)
## vector.h (module 'core'): std::string ns3::Vector2DValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## vector.h (module 'core'): void ns3::Vector2DValue::Set(ns3::Vector2D const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Vector2D const &', 'value')])
return
def register_Ns3Vector3DChecker_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector3DChecker::Vector3DChecker() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector3DChecker::Vector3DChecker(ns3::Vector3DChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::Vector3DChecker const &', 'arg0')])
return
def register_Ns3Vector3DValue_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue(ns3::Vector3D const & value) [constructor]
cls.add_constructor([param('ns3::Vector3D const &', 'value')])
## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue(ns3::Vector3DValue const & arg0) [constructor]
cls.add_constructor([param('ns3::Vector3DValue const &', 'arg0')])
## vector.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::Vector3DValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## vector.h (module 'core'): bool ns3::Vector3DValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## vector.h (module 'core'): ns3::Vector3D ns3::Vector3DValue::Get() const [member function]
cls.add_method('Get',
'ns3::Vector3D',
[],
is_const=True)
## vector.h (module 'core'): std::string ns3::Vector3DValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## vector.h (module 'core'): void ns3::Vector3DValue::Set(ns3::Vector3D const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Vector3D const &', 'value')])
return
def register_Ns3CallbackImpl__Ns3ObjectBase___star___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl(ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackImpl< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
## callback.h (module 'core'): static std::string ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::DoGetTypeid() [member function]
cls.add_method('DoGetTypeid',
'std::string',
[],
is_static=True)
## callback.h (module 'core'): std::string ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): ns3::ObjectBase * ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::operator()() [member operator]
cls.add_method('operator()',
'ns3::ObjectBase *',
[],
custom_name='__call__', is_pure_virtual=True, is_virtual=True)
return
def register_Ns3HashImplementation_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [constructor]
cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor]
cls.add_constructor([])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_pure_virtual=True, is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function]
cls.add_method('clear',
'void',
[],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3HashFunctionFnv1a_methods(root_module, cls):
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [constructor]
cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')])
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor]
cls.add_constructor([])
## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash32_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash64_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionMurmur3_methods(root_module, cls):
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [constructor]
cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor]
cls.add_constructor([])
## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_functions(root_module):
module = root_module
## angles.h (module 'antenna'): std::vector<double, std::allocator<double> > ns3::DegreesToRadians(std::vector<double, std::allocator<double> > const & degrees) [free function]
module.add_function('DegreesToRadians',
'std::vector< double >',
[param('std::vector< double > const &', 'degrees')])
## angles.h (module 'antenna'): double ns3::DegreesToRadians(double degrees) [free function]
module.add_function('DegreesToRadians',
'double',
[param('double', 'degrees')])
## angles.h (module 'antenna'): std::vector<double, std::allocator<double> > ns3::RadiansToDegrees(std::vector<double, std::allocator<double> > const & radians) [free function]
module.add_function('RadiansToDegrees',
'std::vector< double >',
[param('std::vector< double > const &', 'radians')])
## angles.h (module 'antenna'): double ns3::RadiansToDegrees(double radians) [free function]
module.add_function('RadiansToDegrees',
'double',
[param('double', 'radians')])
## angles.h (module 'antenna'): double ns3::WrapTo180(double a) [free function]
module.add_function('WrapTo180',
'double',
[param('double', 'a')])
## angles.h (module 'antenna'): double ns3::WrapTo2Pi(double a) [free function]
module.add_function('WrapTo2Pi',
'double',
[param('double', 'a')])
## angles.h (module 'antenna'): double ns3::WrapTo360(double a) [free function]
module.add_function('WrapTo360',
'double',
[param('double', 'a')])
## angles.h (module 'antenna'): double ns3::WrapToPi(double a) [free function]
module.add_function('WrapToPi',
'double',
[param('double', 'a')])
register_functions_ns3_FatalImpl(module.add_cpp_namespace('FatalImpl'), root_module)
register_functions_ns3_Hash(module.add_cpp_namespace('Hash'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def register_functions_ns3_Hash(module, root_module):
register_functions_ns3_Hash_Function(module.add_cpp_namespace('Function'), root_module)
return
def register_functions_ns3_Hash_Function(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
| nsnam/ns-3-dev-git | src/antenna/bindings/modulegen__gcc_LP64.py | Python | gpl-2.0 | 124,960 |
# -*- coding: utf-8 -*-
# config.py
from authomatic.providers import oauth2, oauth1, openid, gaeopenid
CONFIG = {
'tw': { # Your internal provider name
# Provider class
'class_': oauth1.Twitter,
# Twitter is an AuthorizationProvider so we need to set several
# other properties too:
'consumer_key': '########################',
'consumer_secret': '########################',
},
'fb': {
'class_': oauth2.Facebook,
# Facebook is an AuthorizationProvider too.
'consumer_key': '########################',
'consumer_secret': '########################',
# But it is also an OAuth 2.0 provider and it needs scope.
'scope': ['user_about_me', 'email', 'publish_stream'],
},
'gae_oi': {
# OpenID based Google App Engine Users API works only on GAE
# and returns only the id and email of a user.
# Moreover, the id is not available in the development environment!
'class_': gaeopenid.GAEOpenID,
},
'oi': {
# OpenID based on python-openid library works everywhere,
# is flexible, but requires more resources.
'class_': openid.OpenID,
}
}
| jasco/authomatic | examples/gae/extras/config-template.py | Python | mit | 1,223 |
# Generated by Django 3.2.4 on 2021-08-12 12:36
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('course', '0051_auto_20210812_1536'),
('news', '0003_alter_news_audience'),
]
operations = [
migrations.AlterModelOptions(
name='news',
options={'ordering': ['course_instance', '-pin', '-publish'], 'verbose_name': 'MODEL_NAME_NEWS', 'verbose_name_plural': 'MODEL_NAME_NEWS_PLURAL'},
),
migrations.AlterField(
model_name='news',
name='audience',
field=models.IntegerField(choices=[(1, 'INTERNAL_USERS'), (2, 'EXTERNAL_USERS'), (3, 'ALL_USERS')], default=3, verbose_name='LABEL_AUDIENCE'),
),
migrations.AlterField(
model_name='news',
name='body',
field=models.TextField(verbose_name='LABEL_BODY'),
),
migrations.AlterField(
model_name='news',
name='course_instance',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='news', to='course.courseinstance', verbose_name='LABEL_COURSE_INSTANCE'),
),
migrations.AlterField(
model_name='news',
name='pin',
field=models.BooleanField(default=False, verbose_name='LABEL_PIN'),
),
migrations.AlterField(
model_name='news',
name='publish',
field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='LABEL_PUBLISH'),
),
migrations.AlterField(
model_name='news',
name='title',
field=models.CharField(max_length=255, verbose_name='LABEL_TITLE'),
),
]
| teemulehtinen/a-plus | news/migrations/0004_auto_20210812_1536.py | Python | gpl-3.0 | 1,831 |
# coding: utf-8
"""
Constants used across the app.
"""
from __future__ import absolute_import, print_function, unicode_literals
from enum import Enum
# Some backends give more infos than others. Here is the precedence we want to
# use. First is most important one, last is the one that will always be
# considered as less trustable if two backends have similar info about a
# housing.
BACKENDS_BY_PRECEDENCE = [
"foncia",
"seloger",
"pap",
"leboncoin",
"explorimmo",
"logicimmo",
]
class TimeToModes(Enum):
PUBLIC_TRANSPORT = -1
WALK = 1
BIKE = 2
CAR = 3
| Phyks/Flatisfy | flatisfy/constants.py | Python | mit | 599 |
import os.path
import yaml
default_configuration = {
'solder': {
'database': {
'host': 'localhost',
'user': '',
'password': '',
'db': '',
'port': 3306
},
'directories': {
'solder': '/path/to/solder/installation',
'public': '{solder}/public',
'mods': '{public}/mods',
'resources': '{public}/resources'
},
}
}
class ConfigError(BaseException):
pass
def deepupdate(original, update):
"""
Recursively update a dict.
Subdict's won't be overwritten but also updated.
"""
for key, value in original.items():
if not key in update:
update[key] = value
elif isinstance(value, dict):
deepupdate(value, update[key])
return update
def load_config(fn):
try:
with open(os.path.expanduser(fn), 'r') as inf:
config = yaml.load(inf)
except IOError:
raise ConfigError('Config file missing')
if 'solder' not in config:
raise ConfigError('Config file missing "solder" section!')
# Update directories to use referenced locations
config = deepupdate(default_configuration, config)
c = config['solder']['directories']
nc = dict()
for k, v in c.items():
nkey = '_{}'.format(k)
nval = v.format(**c)
nc[nkey] = v
nc[k] = os.path.expanduser(nval)
config['solder']['directories'] = nc
return config
if __name__ == '__main__':
t = load_config('~/.solder-cli.yml')
print(t)
| DocHoncho/solder-cli | solder/config.py | Python | gpl-2.0 | 1,672 |
import pygame
from event_manager import Listener
from entity import Entity
import sprites
import models
import events
import time
class Pygame(Listener):
"""
Pygame is a Listener class that handles all
graphics updates with the PyGame library.
"""
WHITE = (255, 255, 255)
FONT_NO_ANTIALIASING = 0
FONT_ANTIALIASING = 1
WINDOW_SURFACE_FLAGS = 0
def __init__(self, event_manager, viewport_size, resolution,
bitdepth = 32, use_fullscreen = False):
Listener.__init__(self, event_manager)
self.em.register(self)
self.viewport_size = viewport_size
self.resolution = resolution
self.use_fullscreen = use_fullscreen
self.bitdepth = bitdepth
self._init_display()
def __del__(self):
if pygame.display.get_init():
pygame.display.quit()
def notify(self, event):
if isinstance(event, events.Tick):
self._update_viewport()
elif isinstance(event, events.PlayerUnitSpawn):
self._show_player_unit(event.unit)
elif isinstance(event, events.PlayerUnitHit):
self._flash_player_unit(event.unit)
elif isinstance(event, events.PlayerUnitMove):
self._move_player_unit(event.unit)
elif isinstance(event, events.PlayerUnitPrimaryFire):
self._add_bullets(event.bullets)
elif isinstance(event, events.EnemyUnitSpawn):
self._add_enemy_unit(event.unit)
elif isinstance(event, events.EnemyUnitHit):
self._flash_enemy_unit(event.unit)
elif isinstance(event, events.EnemyUnitPrimaryFire):
self._add_enemy_bullets(event.bullets)
elif isinstance(event, events.EnemyBulletFire):
self._add_enemy_bullets(event.bullets)
elif isinstance(event, events.BalanceBarInit):
self._show_balance_bar(event.hud_item)
def _init_display(self):
if not pygame.display.get_init():
pygame.display.init()
pygame.font.init()
# Create "virtual" screen surface (fixed resolution)
self.window = pygame.Surface(self.viewport_size, self.WINDOW_SURFACE_FLAGS, self.bitdepth)
fullscreen = 0
if self.use_fullscreen:
fullscreen = pygame.FULLSCREEN
self.real_window = pygame.display.set_mode(self.resolution,
fullscreen, self.bitdepth)
# Sprite groups
self.player_unit_sprites = pygame.sprite.Group()
self.enemy_unit_sprites = pygame.sprite.Group()
self.bullet_sprites = pygame.sprite.Group()
self.hud_items_sprites = pygame.sprite.Group()
# Sprites
self.player_unit_sprite = sprites.PlayerUnit()
self.balance_bar_sprite = sprites.BalanceBar()
# Background
self.background = pygame.surface.Surface(self.window.get_size())
self.background.fill((23, 47, 28))
self.osd_font = pygame.font.SysFont("Courier", 16)
pygame.display.set_caption("SHMUP PROJECT")
pygame.mouse.set_visible(False)
self.old_time = time.time()
self.start_time = time.time() # for profiling
self.fps = 0
self.fps_counter = 0
def _scale_viewport(self):
pygame.transform.scale(self.window, self.resolution, self.real_window)
def _show_player_unit(self, unit):
self.player_unit_sprites.add(self.player_unit_sprite)
self._move_player_unit(unit)
def _move_player_unit(self, unit):
self.player_unit_sprite.rect.center = unit.pos
def _add_bullets(self, bullets):
for bullet in bullets:
sprite = sprites.Bullet(entity = bullet)
self.bullet_sprites.add(sprite)
def _add_enemy_bullets(self, bullets):
if bullets:
for bullet in bullets:
sprite = sprites.EnemyBullet(entity = bullet)
self.bullet_sprites.add(sprite)
def _add_enemy_unit(self, unit):
sprite = sprites.EnemyUnit(entity = unit)
self.enemy_unit_sprites.add(sprite)
def _flash_enemy_unit(self, unit):
for sprite in (s for s in self.enemy_unit_sprites if s.entity == unit):
sprite.state = sprites.EnemyUnit.FLASHING
def _flash_player_unit(self, unit):
self.player_unit_sprite.state = sprites.PlayerUnit.FLASHING
def _show_balance_bar(self, hud_item):
self.hud_items_sprites.add(self.balance_bar_sprite)
self.balance_bar_sprite.rect.center = hud_item.pos
def _show_debug(self):
self.fps_counter += 1
if self.fps > 0:
text_fps = self.osd_font.render("fps: %d (%.3f ms)" % (self.fps, 1000.0 / self.fps), self.FONT_ANTIALIASING, self.WHITE)
self.real_window.blit(text_fps, (5, 5))
text_enemies = self.osd_font.render("enemies: %d" % len(self.enemy_unit_sprites), self.FONT_ANTIALIASING, self.WHITE)
text_bullets = self.osd_font.render("bullets: %d" % len(self.bullet_sprites), self.FONT_ANTIALIASING, self.WHITE)
total_entities = len(self.enemy_unit_sprites) + len(self.bullet_sprites)
text_total_entities = self.osd_font.render("total entities: %d" % total_entities, self.FONT_ANTIALIASING, self.WHITE)
self.real_window.blit(text_enemies, (5, 20))
self.real_window.blit(text_bullets, (5, 35))
self.real_window.blit(text_total_entities, (5, 50))
if time.time() - self.old_time >= 1:
self.fps = self.fps_counter
self.fps_counter = 0
self.old_time = time.time()
#print "FPS:", self.fps
def _update_viewport(self):
self.player_unit_sprites.update()
deleted_enemies = [e for e in self.enemy_unit_sprites if e.entity.state == Entity.DELETED]
self.enemy_unit_sprites.remove(deleted_enemies)
self.enemy_unit_sprites.update()
deleted_bullets = [e for e in self.bullet_sprites if e.entity.state == Entity.DELETED]
self.bullet_sprites.remove(deleted_bullets)
self.bullet_sprites.update()
self.window.blit(self.background, self.window.get_rect())
self.player_unit_sprites.draw(self.window)
self.enemy_unit_sprites.draw(self.window)
self.bullet_sprites.draw(self.window)
self.hud_items_sprites.draw(self.window)
self._scale_viewport()
self._show_debug() # draws to real_window
pygame.display.update()
# for profiling
#if time.time() - self.start_time > 100:
#self.em.post(events.Quit())
| viswimmer1/PythonGenerator | data/python_files/29417298/views.py | Python | gpl-2.0 | 6,599 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 OpenStack, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import calendar
import eventlet
import os
import time
from oslo.config import cfg
from glance.common import crypt
from glance.common import exception
from glance.common import utils
from glance import context
from glance.openstack.common import lockutils
import glance.openstack.common.log as logging
import glance.openstack.common.uuidutils as uuidutils
import glance.registry.client.v1.api as registry
LOG = logging.getLogger(__name__)
scrubber_opts = [
cfg.StrOpt('scrubber_datadir',
default='/var/lib/glance/scrubber',
help=_('Directory that the scrubber will use to track '
'information about what to delete. '
'Make sure this is set in glance-api.conf and '
'glance-scrubber.conf')),
cfg.IntOpt('scrub_time', default=0,
help=_('The amount of time in seconds to delay before '
'performing a delete.')),
cfg.BoolOpt('cleanup_scrubber', default=False,
help=_('A boolean that determines if the scrubber should '
'clean up the files it uses for taking data. Only '
'one server in your deployment should be designated '
'the cleanup host.')),
cfg.IntOpt('cleanup_scrubber_time', default=86400,
help=_('Items must have a modified time that is older than '
'this value in order to be candidates for cleanup.'))
]
CONF = cfg.CONF
CONF.register_opts(scrubber_opts)
CONF.import_opt('metadata_encryption_key', 'glance.common.config')
class ScrubQueue(object):
"""Image scrub queue base class.
The queue contains image's location which need to delete from backend.
"""
def __init__(self):
registry.configure_registry_client()
registry.configure_registry_admin_creds()
self.registry = registry.get_registry_client(context.RequestContext())
@abc.abstractmethod
def add_location(self, image_id, uri, user_context=None):
"""Adding image location to scrub queue.
:param image_id: The opaque image identifier
:param uri: The opaque image location uri
:param user_context: The user's request context
"""
pass
@abc.abstractmethod
def get_all_locations(self):
"""Returns a list of image id and location tuple from scrub queue.
:retval a list of image id and location tuple from scrub queue
"""
pass
@abc.abstractmethod
def pop_all_locations(self):
"""Pop out a list of image id and location tuple from scrub queue.
:retval a list of image id and location tuple from scrub queue
"""
pass
@abc.abstractmethod
def has_image(self, image_id):
"""Returns whether the queue contains an image or not.
:param image_id: The opaque image identifier
:retval a boolean value to inform including or not
"""
pass
class ScrubFileQueue(ScrubQueue):
"""File-based image scrub queue class."""
def __init__(self):
super(ScrubFileQueue, self).__init__()
self.scrubber_datadir = CONF.scrubber_datadir
utils.safe_mkdirs(self.scrubber_datadir)
self.scrub_time = CONF.scrub_time
self.metadata_encryption_key = CONF.metadata_encryption_key
def _read_queue_file(self, file_path):
"""Reading queue file to loading deleted location and timestamp out.
:param file_path: Queue file full path
:retval a list of image location timestamp tuple from queue file
"""
uris = []
delete_times = []
try:
with open(file_path, 'r') as f:
while True:
uri = f.readline().strip()
if uri:
uris.append(uri)
delete_times.append(int(f.readline().strip()))
else:
break
except Exception:
LOG.error(_("%s file can not be read.") % file_path)
return uris, delete_times
def _update_queue_file(self, file_path, remove_record_idxs):
"""Updating queue file to remove such queue records.
:param file_path: Queue file full path
:param remove_record_idxs: A list of record index those want to remove
"""
try:
with open(file_path, 'r') as f:
lines = f.readlines()
# NOTE(zhiyan) we need bottom up removing to
# keep record index be valid.
remove_record_idxs.sort(reverse=True)
for record_idx in remove_record_idxs:
# Each record has two lines
line_no = (record_idx + 1) * 2 - 1
del lines[line_no:line_no + 2]
with open(file_path, 'w') as f:
f.write(''.join(lines))
os.chmod(file_path, 0o600)
except Exception:
LOG.error(_("%s file can not be wrote.") % file_path)
def add_location(self, image_id, uri, user_context=None):
"""Adding image location to scrub queue.
:param image_id: The opaque image identifier
:param uri: The opaque image location uri
:param user_context: The user's request context
"""
if user_context is not None:
registry_client = registry.get_registry_client(user_context)
else:
registry_client = self.registry
with lockutils.lock("scrubber-%s" % image_id,
lock_file_prefix='glance-', external=True):
# NOTE(zhiyan): make sure scrubber does not cleanup
# 'pending_delete' images concurrently before the code
# get lock and reach here.
try:
image = registry_client.get_image(image_id)
if image['status'] == 'deleted':
return
except exception.NotFound as e:
LOG.error(_("Failed to find image to delete: "
"%(e)s") % locals())
return
delete_time = time.time() + self.scrub_time
file_path = os.path.join(self.scrubber_datadir, str(image_id))
if self.metadata_encryption_key is not None:
uri = crypt.urlsafe_encrypt(self.metadata_encryption_key,
uri, 64)
if os.path.exists(file_path):
# Append the uri of location to the queue file
with open(file_path, 'a') as f:
f.write('\n')
f.write('\n'.join([uri, str(int(delete_time))]))
else:
# NOTE(zhiyan): Protect the file before we write any data.
open(file_path, 'w').close()
os.chmod(file_path, 0o600)
with open(file_path, 'w') as f:
f.write('\n'.join([uri, str(int(delete_time))]))
os.utime(file_path, (delete_time, delete_time))
def _walk_all_locations(self, remove=False):
"""Returns a list of image id and location tuple from scrub queue.
:param remove: Whether remove location from queue or not after walk
:retval a list of image image_id and location tuple from scrub queue
"""
if not os.path.exists(self.scrubber_datadir):
LOG.info(_("%s directory does not exist.") % self.scrubber_datadir)
return []
ret = []
for root, dirs, files in os.walk(self.scrubber_datadir):
for image_id in files:
if not uuidutils.is_uuid_like(image_id):
continue
with lockutils.lock("scrubber-%s" % image_id,
lock_file_prefix='glance-', external=True):
file_path = os.path.join(self.scrubber_datadir, image_id)
uris, delete_times = self._read_queue_file(file_path)
remove_record_idxs = []
skipped = False
for (record_idx, delete_time) in enumerate(delete_times):
if delete_time > time.time():
skipped = True
continue
else:
ret.append((image_id, uris[record_idx]))
remove_record_idxs.append(record_idx)
if remove:
if skipped:
# NOTE(zhiyan): remove location records from
# the queue file.
self._update_queue_file(file_path,
remove_record_idxs)
else:
utils.safe_remove(file_path)
return ret
def get_all_locations(self):
"""Returns a list of image id and location tuple from scrub queue.
:retval a list of image id and location tuple from scrub queue
"""
return self._walk_all_locations()
def pop_all_locations(self):
"""Pop out a list of image id and location tuple from scrub queue.
:retval a list of image id and location tuple from scrub queue
"""
return self._walk_all_locations(remove=True)
def has_image(self, image_id):
"""Returns whether the queue contains an image or not.
:param image_id: The opaque image identifier
:retval a boolean value to inform including or not
"""
return os.path.exists(os.path.join(self.scrubber_datadir,
str(image_id)))
class ScrubDBQueue(ScrubQueue):
"""Database-based image scrub queue class."""
def __init__(self):
super(ScrubDBQueue, self).__init__()
self.cleanup_scrubber_time = CONF.cleanup_scrubber_time
def add_location(self, image_id, uri, user_context=None):
"""Adding image location to scrub queue.
:param image_id: The opaque image identifier
:param uri: The opaque image location uri
:param user_context: The user's request context
"""
raise NotImplementedError
def _walk_all_locations(self, remove=False):
"""Returns a list of image id and location tuple from scrub queue.
:param remove: Whether remove location from queue or not after walk
:retval a list of image id and location tuple from scrub queue
"""
filters = {'deleted': True,
'is_public': 'none',
'status': 'pending_delete'}
ret = []
for image in self.registry.get_images_detailed(filters=filters):
deleted_at = image.get('deleted_at')
if not deleted_at:
continue
# NOTE: Strip off microseconds which may occur after the last '.,'
# Example: 2012-07-07T19:14:34.974216
date_str = deleted_at.rsplit('.', 1)[0].rsplit(',', 1)[0]
delete_time = calendar.timegm(time.strptime(date_str,
"%Y-%m-%dT%H:%M:%S"))
if delete_time + self.cleanup_scrubber_time > time.time():
continue
ret.extend([(image['id'], location['uri'])
for location in image['location_data']])
if remove:
self.registry.update_image(image['id'], {'status': 'deleted'})
return ret
def get_all_locations(self):
"""Returns a list of image id and location tuple from scrub queue.
:retval a list of image id and location tuple from scrub queue
"""
return self._walk_all_locations()
def pop_all_locations(self):
"""Pop out a list of image id and location tuple from scrub queue.
:retval a list of image id and location tuple from scrub queue
"""
return self._walk_all_locations(remove=True)
def has_image(self, image_id):
"""Returns whether the queue contains an image or not.
:param image_id: The opaque image identifier
:retval a boolean value to inform including or not
"""
try:
image = self.registry.get_image(image_id)
return image['status'] == 'pending_delete'
except exception.NotFound as e:
return False
_file_queue = None
_db_queue = None
def get_scrub_queues():
global _file_queue, _db_queue
if not _file_queue:
_file_queue = ScrubFileQueue()
if not _db_queue:
_db_queue = ScrubDBQueue()
return (_file_queue, _db_queue)
class Daemon(object):
def __init__(self, wakeup_time=300, threads=1000):
LOG.info(_("Starting Daemon: wakeup_time=%(wakeup_time)s "
"threads=%(threads)s") % locals())
self.wakeup_time = wakeup_time
self.event = eventlet.event.Event()
self.pool = eventlet.greenpool.GreenPool(threads)
def start(self, application):
self._run(application)
def wait(self):
try:
self.event.wait()
except KeyboardInterrupt:
msg = _("Daemon Shutdown on KeyboardInterrupt")
LOG.info(msg)
def _run(self, application):
LOG.debug(_("Running application"))
self.pool.spawn_n(application.run, self.pool, self.event)
eventlet.spawn_after(self.wakeup_time, self._run, application)
LOG.debug(_("Next run scheduled in %s seconds") % self.wakeup_time)
class Scrubber(object):
def __init__(self, store_api):
LOG.info(_("Initializing scrubber with configuration: %s") %
unicode({'scrubber_datadir': CONF.scrubber_datadir,
'cleanup': CONF.cleanup_scrubber,
'cleanup_time': CONF.cleanup_scrubber_time,
'registry_host': CONF.registry_host,
'registry_port': CONF.registry_port}))
utils.safe_mkdirs(CONF.scrubber_datadir)
self.store_api = store_api
registry.configure_registry_client()
registry.configure_registry_admin_creds()
self.registry = registry.get_registry_client(context.RequestContext())
(self.file_queue, self.db_queue) = get_scrub_queues()
def _get_delete_jobs(self, queue, pop):
try:
if pop:
image_id_uri_list = queue.pop_all_locations()
else:
image_id_uri_list = queue.get_all_locations()
except:
LOG.error(_("Can not %s scrub jobs from queue.") %
'pop' if pop else 'get')
return None
delete_jobs = {}
for image_id, image_uri in image_id_uri_list:
if not image_id in delete_jobs:
delete_jobs[image_id] = []
delete_jobs[image_id].append((image_id, image_uri))
return delete_jobs
def run(self, pool, event=None):
delete_jobs = self._get_delete_jobs(self.file_queue, True)
if delete_jobs:
for image_id, jobs in delete_jobs.iteritems():
self._scrub_image(pool, image_id, jobs)
if CONF.cleanup_scrubber:
self._cleanup(pool)
def _scrub_image(self, pool, image_id, delete_jobs):
if len(delete_jobs) == 0:
return
LOG.info(_("Scrubbing image %(id)s from %(count)d locations.") %
{'id': image_id, 'count': len(delete_jobs)})
# NOTE(bourke): The starmap must be iterated to do work
list(pool.starmap(self._delete_image_from_backend, delete_jobs))
image = self.registry.get_image(image_id)
if (image['status'] == 'pending_delete' and
not self.file_queue.has_image(image_id)):
self.registry.update_image(image_id, {'status': 'deleted'})
def _delete_image_from_backend(self, image_id, uri):
if CONF.metadata_encryption_key is not None:
uri = crypt.urlsafe_decrypt(CONF.metadata_encryption_key, uri)
try:
LOG.debug(_("Deleting URI from image %(image_id)s.") %
{'image_id': image_id})
# Here we create a request context with credentials to support
# delayed delete when using multi-tenant backend storage
admin_tenant = CONF.admin_tenant_name
auth_token = self.registry.auth_tok
admin_context = context.RequestContext(user=CONF.admin_user,
tenant=admin_tenant,
auth_tok=auth_token)
self.store_api.delete_from_backend(admin_context, uri)
except Exception:
msg = _("Failed to delete URI from image %(image_id)s")
LOG.error(msg % {'image_id': image_id})
def _read_cleanup_file(self, file_path):
"""Reading cleanup to get latest cleanup timestamp.
:param file_path: Cleanup status file full path
:retval latest cleanup timestamp
"""
try:
if not os.path.exists(file_path):
msg = _("%s file is not exists.") % unicode(file_path)
raise Exception(msg)
atime = int(os.path.getatime(file_path))
mtime = int(os.path.getmtime(file_path))
if atime != mtime:
msg = _("%s file contains conflicting cleanup "
"timestamp.") % unicode(file_path)
raise Exception(msg)
return atime
except Exception as e:
LOG.error(e)
return None
def _update_cleanup_file(self, file_path, cleanup_time):
"""Update latest cleanup timestamp to cleanup file.
:param file_path: Cleanup status file full path
:param cleanup_time: The Latest cleanup timestamp
"""
try:
open(file_path, 'w').close()
os.chmod(file_path, 0o600)
os.utime(file_path, (cleanup_time, cleanup_time))
except Exception:
LOG.error(_("%s file can not be created.") % unicode(file_path))
def _cleanup(self, pool):
now = time.time()
cleanup_file = os.path.join(CONF.scrubber_datadir, ".cleanup")
if not os.path.exists(cleanup_file):
self._update_cleanup_file(cleanup_file, now)
return
last_cleanup_time = self._read_cleanup_file(cleanup_file)
cleanup_time = last_cleanup_time + CONF.cleanup_scrubber_time
if cleanup_time > now:
return
LOG.info(_("Getting images deleted before "
"%s") % CONF.cleanup_scrubber_time)
self._update_cleanup_file(cleanup_file, now)
delete_jobs = self._get_delete_jobs(self.db_queue, False)
if not delete_jobs:
return
for image_id, jobs in delete_jobs.iteritems():
with lockutils.lock("scrubber-%s" % image_id,
lock_file_prefix='glance-', external=True):
if not self.file_queue.has_image(image_id):
# NOTE(zhiyan): scrubber should not cleanup this image
# since a queue file be created for this 'pending_delete'
# image concurrently before the code get lock and
# reach here. The checking only be worth if glance-api and
# glance-scrubber service be deployed on a same host.
self._scrub_image(pool, image_id, jobs)
| SUSE-Cloud/glance | glance/store/scrubber.py | Python | apache-2.0 | 20,254 |
class GCodeLayerMixer(object):
def __init__(self, source):
self.lines = self._generate(source)
self.buffer = [ ]
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
if len(self.buffer) == 0:
self._populate_buffer()
head, tail = self.buffer[0], self.buffer[1:]
self.buffer = tail
return head
def _generate(self,source):
for line in source:
yield line
def _populate_buffer(self):
layer_data = [ ]
complete = False
try:
current = self.lines.next().rstrip()
except StopIteration:
raise StopIteration
while not complete and not self._is_z_movement(current):
layer_data.append(current)
try:
current = self.lines.next().rstrip()
except StopIteration:
complete = True
mixed_layer = self._mixup(layer_data)
if not complete:
mixed_layer.append(current)
self.buffer = mixed_layer
_last_mix_up_index = 1
def _mixup(self, items):
if len(items) < 1:
return items
if len(items) < self. _last_mix_up_index:
self._last_mix_up_index = 0
head = items[:self._last_mix_up_index]
tail = items[self._last_mix_up_index:]
[ tail.append(element) for element in head ]
self._last_mix_up_index += 1
return tail
def _is_z_movement(self, gcodeline):
return gcodeline.startswith('G') and 'Z' in gcodeline
| PeachyPrinter/peachytoolchain | src/util/gcode_layer_mixer.py | Python | gpl-3.0 | 1,641 |
# -*- coding: utf-8 -*-
from rest_framework_extensions.test import APITestCase
from rest_framework_extensions.settings import extensions_api_settings
from rest_framework_extensions import utils
from .urls import urlpatterns
from .models import CommentForListDestroyModelMixin as Comment
from tests_app.testutils import override_extensions_api_settings
class ListDestroyModelMixinTest(APITestCase):
urls = urlpatterns
def setUp(self):
self.comments = [
Comment.objects.create(
id=1,
email='[email protected]'
),
Comment.objects.create(
id=2,
email='[email protected]'
)
]
self.protection_headers = {
utils.prepare_header_name(extensions_api_settings.DEFAULT_BULK_OPERATION_HEADER_NAME): 'true'
}
def test_simple_response(self):
resp = self.client.get('/comments/')
expected = [
{
'id': 1,
'email': '[email protected]'
},
{
'id': 2,
'email': '[email protected]'
}
]
self.assertEqual(resp.data, expected)
def test_filter_works(self):
resp = self.client.get('/comments/?id=1')
expected = [
{
'id': 1,
'email': '[email protected]'
}
]
self.assertEqual(resp.data, expected)
def test_destroy_instance(self):
resp = self.client.delete('/comments/1/')
self.assertEqual(resp.status_code, 204)
self.assertFalse(1 in Comment.objects.values_list('pk', flat=True))
def test_bulk_destroy__without_protection_header(self):
resp = self.client.delete('/comments/')
self.assertEqual(resp.status_code, 400)
expected_message = {
'detail': 'Header \'{0}\' should be provided for bulk operation.'.format(
extensions_api_settings.DEFAULT_BULK_OPERATION_HEADER_NAME
)
}
self.assertEqual(resp.data, expected_message)
def test_bulk_destroy__with_protection_header(self):
resp = self.client.delete('/comments/', **self.protection_headers)
self.assertEqual(resp.status_code, 204)
self.assertEqual(Comment.objects.count(), 0)
@override_extensions_api_settings(DEFAULT_BULK_OPERATION_HEADER_NAME=None)
def test_bulk_destroy__without_protection_header__and_with_turned_off_protection_header(self):
resp = self.client.delete('/comments/')
self.assertEqual(resp.status_code, 204)
self.assertEqual(Comment.objects.count(), 0)
def test_bulk_destroy__should_destroy_filtered_queryset(self):
resp = self.client.delete('/comments/?id=1', **self.protection_headers)
self.assertEqual(resp.status_code, 204)
self.assertEqual(Comment.objects.count(), 1)
self.assertEqual(Comment.objects.all()[0], self.comments[1])
def test_bulk_destroy__should_not_destroy_if_client_has_no_permissions(self):
resp = self.client.delete('/comments-with-permission/', **self.protection_headers)
self.assertEqual(resp.status_code, 404)
self.assertEqual(Comment.objects.count(), 2) | ticosax/drf-extensions | tests_app/tests/functional/mixins/list_destroy_model_mixin/tests.py | Python | mit | 3,257 |
import os
from abc import ABCMeta
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import tensorlight as light
import base
class MNISTBaseDataset(base.AbstractDataset):
"""MNIST base dataset wrapping the functions provided by tensorflow."""
__metaclass__ = ABCMeta
# load the dataset file lazily and just once
_mnist = None
"""MNIST base dataset wrapping the functions provided by tensorflow."""
def __init__(self, data_dir, dataset, as_binary):
"""Creates a dataset instance.
Parameters
----------
data_dir: str
The path where the data will be stored.
dataset: Dataset
The TensorFlow MNIST dataset to use.
as_binary: Boolean
Whether the data should be returned as float (default) in range
[0.0, 1.0], or as pseudo-binary with values {0.0, 1.0}, were the
original data.
"""
data = dataset.images.reshape((-1, 28, 28, 1))
if as_binary:
self._data = light.utils.data.as_binary(data)
else:
self._data = data
self._targets = dataset.labels
dataset_size = dataset.num_examples
self._indices = np.arange(dataset_size)
self._row = 0
super(MNISTBaseDataset, self).__init__(data_dir, dataset_size, [28,28,1], [10])
@light.utils.attr.override
def get_batch(self, batch_size):
if self._row + batch_size >= self.size:
self.reset()
start = self._row
end = start + batch_size
ind_range = self._indices[start:end]
self._row += batch_size
images = self._data[ind_range]
labels = self._targets[ind_range]
return images, labels
@light.utils.attr.override
def reset(self):
self._row = 0
np.random.shuffle(self._indices)
@staticmethod
def mnist(data_dir):
if MNISTBaseDataset._mnist is None:
directory = os.path.join(data_dir, 'MNIST_data')
MNISTBaseDataset._mnist = input_data.read_data_sets(directory, one_hot=True)
return MNISTBaseDataset._mnist
class MNISTTrainDataset(MNISTBaseDataset):
"""MNIST training dataset wrapping the functions provided by tensorflow.
Parameters
----------
data_dir: str
The path where the data will be stored.
as_binary: Boolean
Whether the data should be returned as float (default) in range
[0.0, 1.0], or as pseudo-binary with values {0.0, 1.0}, were the
original data.
"""
def __init__(self, data_dir, as_binary=False):
mnist = MNISTBaseDataset.mnist(data_dir)
super(MNISTTrainDataset, self).__init__(data_dir, mnist.train, as_binary)
class MNISTValidDataset(MNISTBaseDataset):
"""MNIST validation dataset wrapping the functions provided by tensorflow.
Parameters
----------
data_dir: str
The path where the data will be stored.
as_binary: Boolean
Whether the data should be returned as float (default) in range
[0.0, 1.0], or as pseudo-binary with values {0.0, 1.0}, were the
original data.
"""
def __init__(self, data_dir, as_binary=False):
mnist = MNISTBaseDataset.mnist(data_dir)
super(MNISTValidDataset, self).__init__(data_dir, mnist.validation, as_binary)
class MNISTTestDataset(MNISTBaseDataset):
"""MNIST test dataset wrapping the functions provided by tensorflow.
Parameters
----------
data_dir: str
The path where the data will be stored.
as_binary: Boolean
Whether the data should be returned as float (default) in range
[0.0, 1.0], or as pseudo-binary with values {0.0, 1.0}, were the
original data.
"""
def __init__(self, data_dir, as_binary=False):
mnist = MNISTBaseDataset.mnist(data_dir)
super(MNISTTestDataset, self).__init__(data_dir, mnist.test, as_binary)
| bsautermeister/tensorlight | tensorlight/datasets/mnist.py | Python | mit | 4,084 |
"""
Django settings for avalon project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 's!&2oe*ppw36!n05#33089hb6-d%+(q1fianz*ejvsl4*kzyox'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'avalon_game.apps.AvalonGameConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
# Does not exist in Django before 1.8.
#'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'avalon.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'avalon.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
'ATOMIC_REQUESTS': True,
'OPTIONS': {
'timeout': 60, # longer database timeout
}
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| dperelman/avalon-django | avalon/avalon/settings.py | Python | agpl-3.0 | 3,366 |
class Movement:
def __init__(self):
self.motions = list()
def append_motion(self, motion=None):
self.motions.append(motion)
def get_maximum_duration(self) -> int:
"""
>>> from Watchdog import Main
>>> main = Main()
>>> from MessageReader import MessageReader
>>> data = main.send_message(open('test.json'))
>>> message_reader = MessageReader()
>>> movements = message_reader.read_data(data)
>>> movement = movements[0]
>>> movement.get_maximum_duration()
2000
:return: int max duration of motion of movement
"""
listtest = [motion.duration.duration for motion in self.motions]
return max(listtest)
| idobrusin/RocLang | RocDemo/roc_node/scripts/models/Movement.py | Python | mit | 742 |
import unittest
# http://blog.csdn.net/pointbreak1/article/details/48780345
def groupStrings(strings):
groups = {}
for s in strings:
s_hash = hashStr(s)
if s_hash not in groups:
groups[s_hash] = [s]
else:
groups[s_hash].append(s)
result = []
for key, val in groups.items():
print(key, val)
result.append(sorted(val))
return result
def hashStr(s):
return tuple([(ord(c)-ord(s[0])+26) % 26 for c in s])
class Test(unittest.TestCase):
def test_hashStr(self):
self.assertEqual(hashStr('abc'), hashStr('yza'))
self.assertEqual(hashStr('az'), hashStr('ba'))
self.assertEqual(hashStr('eqdf'), hashStr('qcpr'))
def test_groupStrings(self):
strings = ['abc', 'bcd', 'acef', 'xyz','az', 'ba', 'a','z',
'eqdf', 'qcpr']
print(groupStrings(strings))
if __name__ == '__main__':
unittest.main()
| LeonardCohen/coding | py/group_shifted_strings.py | Python | gpl-2.0 | 947 |
from flask import Flask
def create_app(config_filename):
app = Flask(__name__)
app.config.from_object(config_filename)
from models import db
db.init_app(app)
from views import api_bp
app.register_blueprint(api_bp, url_prefix='/api')
return app
| abacuspix/NFV_project | Build_Flask_API/restful_python_section_07/api/app.py | Python | mit | 277 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
import datetime
import asyncio
import aiohttp
import numpy as np
from ... import core
from ... import util
class Flukso(core.plugin.Plugin):
"""
Retrieve data from a Fluksometer
"""
def initialize(self):
core.states.add('flukso/settings/ip', value='192.168.1.1', config={'type': 'string', 'quantity':'', 'unit':'','label':'', 'description':'', 'private':True})
self._loop.create_task(self.schedule_retrieve_data())
logging.debug('Flukso plugin Initialized')
async def schedule_retrieve_data(self):
while self.active:
# timestamps
dt_when = (datetime.datetime.utcnow() + datetime.timedelta(minutes=1)).replace(second=0,microsecond=0)
ts_when = util.time.timestamp(dt_when)
await self.retrieve_data()
# sleep until the next call
await asyncio.sleep(util.time.seconds_until(ts_when))
async def retrieve_data(self):
for sensor in core.components.find(type='fluksosensor'):
try:
url = 'http://{}:8080/sensor/{}?version=1.0&unit={}&interval=minute'.format(core.states['flukso/settings/ip'].value,sensor.config['id'],sensor.config['unit'])
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
response = await resp.text()
data = eval(response.replace('"nan"','float(\'nan\')'))
timestamp = data[0][0]
value = np.round(np.nanmean( [row[1] for row in data] ),3)
if np.isnan(value):
value = 0
# FIXME the values are shifted by one minute or should be plotted with backward steps
await sensor.states['value'].set_async( value )
#core.event.fire('measurements_add',{'state':sensor.states['value'],'value':value,'timestamp':timestamp})
logging.debug('Flukso sensor {} updated'.format(sensor.path))
except Exception as e:
logging.error('Could not load data from Flukso sensor {}: {}'.format(sensor.path,e))
class Fluksosensor(core.component.Component):
default_config = {
'id': '',
'token': '',
'type': '',
'unit': 'watt',
}
linked_states = {
'value': {
'default_config': {},
'fixed_config': {'datatype': 'number','timestampdelta':-60},
},
}
core.components.register(Fluksosensor)
| BrechtBa/homeconn | homecon/plugins/flukso/__init__.py | Python | gpl-3.0 | 2,862 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Copyright 2012-2015 OpenBroadcaster, Inc.
This file is part of OpenBroadcaster Player.
OpenBroadcaster Player is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenBroadcaster Player is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with OpenBroadcaster Player. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import absolute_import
import obplayer
from .audiolog import Oboff_air_AudioLog
def init():
obplayer.off_air_AudioLog = Oboff_air_AudioLog()
def quit():
# stop the audio logger.
if hasattr(obplayer, 'off_air_AudioLog'):
obplayer.off_air_AudioLog.stop()
| openbroadcaster/obplayer | obplayer/offair_audiolog/__init__.py | Python | agpl-3.0 | 1,098 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.