hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a1fc681e95aa865f1229d3b611a0ed6145ffd67 | 481 | py | Python | assets/Poser/Reality/Runtime/Libraries/light/Reality/SingleIBLs/LineSingleH/Large/LineSingleH_Large_00.py | Red54/reality | 510d4f5fde2f4c5535482f1ea199f914102b8a2a | [
"BSD-3-Clause"
] | null | null | null | assets/Poser/Reality/Runtime/Libraries/light/Reality/SingleIBLs/LineSingleH/Large/LineSingleH_Large_00.py | Red54/reality | 510d4f5fde2f4c5535482f1ea199f914102b8a2a | [
"BSD-3-Clause"
] | null | null | null | assets/Poser/Reality/Runtime/Libraries/light/Reality/SingleIBLs/LineSingleH/Large/LineSingleH_Large_00.py | Red54/reality | 510d4f5fde2f4c5535482f1ea199f914102b8a2a | [
"BSD-3-Clause"
] | null | null | null | #
# Copyright (c) Pret-a-3D/Paolo Ciccone. All rights reserved.
# Modified by Fuzzy70/Lee Furssedonn with kind permission from Paolo Ciccone
#
from Reality_services import *
from Reality import *
# To customize this script all you need to do is to
# change the following variable
Re_sIBL_Map = ":Runtime:Textures:Reality:SingleIBLs:LineSingleH:LineSingleH_Large_00.ibl"
# Set the IBL Map
Reality.Scene().setIBLImage(ReResolvePoserPath(Re_sIBL_Map).encode("utf8"))
| 32.066667 | 90 | 0.767152 |
4a1fc6e90cc453bd214d46a4ad82a8f7d553f191 | 786 | py | Python | electrum/scripts/get_history.py | asuka431/electrum | 144b19660451bbbdc8314ac8fccd6e79fdf3a4b1 | [
"MIT"
] | null | null | null | electrum/scripts/get_history.py | asuka431/electrum | 144b19660451bbbdc8314ac8fccd6e79fdf3a4b1 | [
"MIT"
] | null | null | null | electrum/scripts/get_history.py | asuka431/electrum | 144b19660451bbbdc8314ac8fccd6e79fdf3a4b1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys
import asyncio
from electrum import bitcoin
from electrum.network import Network
from electrum.util import json_encode, print_msg, create_and_start_event_loop, log_exceptions
from electrum.simple_config import SimpleConfig
try:
addr = sys.argv[1]
except Exception:
print("usage: get_history <baricoin_address>")
sys.exit(1)
config = SimpleConfig()
loop, stopping_fut, loop_thread = create_and_start_event_loop()
network = Network(config)
network.start()
@log_exceptions
async def f():
try:
sh = bitcoin.address_to_scripthash(addr)
hist = await network.get_history_for_scripthash(sh)
print_msg(json_encode(hist))
finally:
stopping_fut.set_result(1)
asyncio.run_coroutine_threadsafe(f(), loop)
| 23.117647 | 93 | 0.755725 |
4a1fc78bc864df2911f9bb2226904ef8c569a363 | 31,257 | py | Python | tools/space.py | zjy8006/MonthlyRunoffForecastByAutoReg | 661fcb5dcdfbbb2ec6861e1668a035b50e69f7c2 | [
"MIT"
] | 2 | 2020-05-18T06:45:04.000Z | 2021-05-18T06:38:23.000Z | tools/space.py | zjy8006/MonthlyRunoffForecastByAutoReg | 661fcb5dcdfbbb2ec6861e1668a035b50e69f7c2 | [
"MIT"
] | null | null | null | tools/space.py | zjy8006/MonthlyRunoffForecastByAutoReg | 661fcb5dcdfbbb2ec6861e1668a035b50e69f7c2 | [
"MIT"
] | 1 | 2020-01-17T02:56:18.000Z | 2020-01-17T02:56:18.000Z | import numbers
import numpy as np
import yaml
from scipy.stats.distributions import randint
from scipy.stats.distributions import rv_discrete
from scipy.stats.distributions import uniform
from sklearn.utils import check_random_state
from sklearn.utils.fixes import sp_version
from .transformers import CategoricalEncoder
from .transformers import StringEncoder
from .transformers import Normalize
from .transformers import Identity
from .transformers import LogN
from .transformers import Pipeline
# helper class to be able to print [1, ..., 4] instead of [1, '...', 4]
class _Ellipsis:
def __repr__(self):
return '...'
def check_dimension(dimension, transform=None):
"""Turn a provided dimension description into a dimension object.
Checks that the provided dimension falls into one of the
supported types. For a list of supported types, look at
the documentation of ``dimension`` below.
If ``dimension`` is already a ``Dimension`` instance, return it.
Parameters
----------
dimension : Dimension
Search space Dimension.
Each search dimension can be defined either as
- a `(lower_bound, upper_bound)` tuple (for `Real` or `Integer`
dimensions),
- a `(lower_bound, upper_bound, "prior")` tuple (for `Real`
dimensions),
- as a list of categories (for `Categorical` dimensions), or
- an instance of a `Dimension` object (`Real`, `Integer` or
`Categorical`).
transform : "identity", "normalize", "string", "onehot" optional
- For `Categorical` dimensions, the following transformations are
supported.
- "onehot" (default) one-hot transformation of the original space.
- "string" string transformation of the original space.
- "identity" same as the original space.
- For `Real` and `Integer` dimensions, the following transformations
are supported.
- "identity", (default) the transformed space is the same as the
original space.
- "normalize", the transformed space is scaled to be between 0 and 1.
Returns
-------
dimension : Dimension
Dimension instance.
"""
if isinstance(dimension, Dimension):
return dimension
if not isinstance(dimension, (list, tuple, np.ndarray)):
raise ValueError("Dimension has to be a list or tuple.")
# A `Dimension` described by a single value is assumed to be
# a `Categorical` dimension. This can be used in `BayesSearchCV`
# to define subspaces that fix one value, e.g. to choose the
# model type, see "sklearn-gridsearchcv-replacement.py"
# for examples.
if len(dimension) == 1:
return Categorical(dimension, transform=transform)
if len(dimension) == 2:
if any([isinstance(d, (str, bool)) or isinstance(d, np.bool_)
for d in dimension]):
return Categorical(dimension, transform=transform)
elif all([isinstance(dim, numbers.Integral) for dim in dimension]):
return Integer(*dimension, transform=transform)
elif any([isinstance(dim, numbers.Real) for dim in dimension]):
return Real(*dimension, transform=transform)
else:
raise ValueError("Invalid dimension {}. Read the documentation for"
" supported types.".format(dimension))
if len(dimension) == 3:
if (any([isinstance(dim, int) for dim in dimension[:2]]) and
dimension[2] in ["uniform", "log-uniform"]):
return Integer(*dimension, transform=transform)
elif (any([isinstance(dim, (float, int)) for dim in dimension[:2]]) and
dimension[2] in ["uniform", "log-uniform"]):
return Real(*dimension, transform=transform)
else:
return Categorical(dimension, transform=transform)
if len(dimension) == 4:
if (any([isinstance(dim, int) for dim in dimension[:2]]) and
dimension[2] == "log-uniform" and isinstance(dimension[3],
int)):
return Integer(*dimension, transform=transform)
elif (any([isinstance(dim, (float, int)) for dim in dimension[:2]]) and
dimension[2] == "log-uniform" and isinstance(dimension[3], int)):
return Real(*dimension, transform=transform)
if len(dimension) > 3:
return Categorical(dimension, transform=transform)
raise ValueError("Invalid dimension {}. Read the documentation for "
"supported types.".format(dimension))
class Dimension(object):
"""Base class for search space dimensions."""
prior = None
def rvs(self, n_samples=1, random_state=None):
"""Draw random samples.
Parameters
----------
n_samples : int or None
The number of samples to be drawn.
random_state : int, RandomState instance, or None (default)
Set random state to something other than None for reproducible
results.
"""
rng = check_random_state(random_state)
samples = self._rvs.rvs(size=n_samples, random_state=rng)
return self.inverse_transform(samples)
def transform(self, X):
"""Transform samples form the original space to a warped space."""
return self.transformer.transform(X)
def inverse_transform(self, Xt):
"""Inverse transform samples from the warped space back into the
original space.
"""
return self.transformer.inverse_transform(Xt)
@property
def size(self):
return 1
@property
def transformed_size(self):
return 1
@property
def bounds(self):
raise NotImplementedError
@property
def transformed_bounds(self):
raise NotImplementedError
@property
def name(self):
return self._name
@name.setter
def name(self, value):
if isinstance(value, str) or value is None:
self._name = value
else:
raise ValueError("Dimension's name must be either string or None.")
def _uniform_inclusive(loc=0.0, scale=1.0):
# like scipy.stats.distributions but inclusive of `high`
# XXX scale + 1. might not actually be a float after scale if
# XXX scale is very large.
return uniform(loc=loc, scale=np.nextafter(scale, scale + 1.))
class Real(Dimension):
"""Search space dimension that can take on any real value.
Parameters
----------
low : float
Lower bound (inclusive).
high : float
Upper bound (inclusive).
prior : "uniform" or "log-uniform", default="uniform"
Distribution to use when sampling random points for this dimension.
- If `"uniform"`, points are sampled uniformly between the lower
and upper bounds.
- If `"log-uniform"`, points are sampled uniformly between
`log(lower, base)` and `log(upper, base)` where log
has base `base`.
base : int
The logarithmic base to use for a log-uniform prior.
- Default 10, otherwise commonly 2.
transform : "identity", "normalize", optional
The following transformations are supported.
- "identity", (default) the transformed space is the same as the
original space.
- "normalize", the transformed space is scaled to be between
0 and 1.
name : str or None
Name associated with the dimension, e.g., "learning rate".
dtype : str or dtype, default=np.float
float type which will be used in inverse_transform,
can be float.
"""
def __init__(self, low, high, prior="uniform", base=10, transform=None,
name=None, dtype=np.float):
if high <= low:
raise ValueError("the lower bound {} has to be less than the"
" upper bound {}".format(low, high))
self.low = low
self.high = high
self.prior = prior
self.base = base
self.log_base = np.log10(base)
self.name = name
self.dtype = dtype
if isinstance(self.dtype, str) and self.dtype\
not in ['float', 'float16', 'float32', 'float64']:
raise ValueError("dtype must be 'float', 'float16', 'float32'"
"or 'float64'"
" got {}".format(self.dtype))
elif isinstance(self.dtype, type) and self.dtype\
not in [float, np.float, np.float16, np.float32, np.float64]:
raise ValueError("dtype must be float, np.float"
" got {}".format(self.dtype))
if transform is None:
transform = "identity"
self.transform_ = transform
if self.transform_ not in ["normalize", "identity"]:
raise ValueError("transform should be 'normalize' or 'identity'"
" got {}".format(self.transform_))
# Define _rvs and transformer spaces.
# XXX: The _rvs is for sampling in the transformed space.
# The rvs on Dimension calls inverse_transform on the points sampled
# using _rvs
if self.transform_ == "normalize":
# set upper bound to next float after 1. to make the numbers
# inclusive of upper edge
self._rvs = _uniform_inclusive(0., 1.)
if self.prior == "uniform":
self.transformer = Pipeline(
[Identity(), Normalize(low, high)])
else:
self.transformer = Pipeline(
[LogN(self.base),
Normalize(np.log10(low) / self.log_base,
np.log10(high) / self.log_base)]
)
else:
if self.prior == "uniform":
self._rvs = _uniform_inclusive(self.low, self.high - self.low)
self.transformer = Identity()
else:
self._rvs = _uniform_inclusive(
np.log10(self.low) / self.log_base,
np.log10(self.high) / self.log_base -
np.log10(self.low) / self.log_base)
self.transformer = LogN(self.base)
def __eq__(self, other):
return (type(self) is type(other) and
np.allclose([self.low], [other.low]) and
np.allclose([self.high], [other.high]) and
self.prior == other.prior and
self.transform_ == other.transform_)
def __repr__(self):
return "Real(low={}, high={}, prior='{}', transform='{}')".format(
self.low, self.high, self.prior, self.transform_)
def inverse_transform(self, Xt):
"""Inverse transform samples from the warped space back into the
original space.
"""
inv_transform = super(Real, self).inverse_transform(Xt)
if isinstance(inv_transform, list):
inv_transform = np.array(inv_transform)
inv_transform = np.clip(inv_transform,
self.low, self.high).astype(self.dtype)
if self.dtype == float or self.dtype == 'float':
# necessary, otherwise the type is converted to a numpy type
return getattr(inv_transform, "tolist", lambda: value)()
else:
return inv_transform
@property
def bounds(self):
return (self.low, self.high)
def __contains__(self, point):
if isinstance(point, list):
point = np.array(point)
return self.low <= point <= self.high
@property
def transformed_bounds(self):
if self.transform_ == "normalize":
return 0.0, 1.0
else:
if self.prior == "uniform":
return self.low, self.high
else:
return np.log10(self.low), np.log10(self.high)
def distance(self, a, b):
"""Compute distance between point `a` and `b`.
Parameters
----------
a : float
First point.
b : float
Second point.
"""
if not (a in self and b in self):
raise RuntimeError("Can only compute distance for values within "
"the space, not %s and %s." % (a, b))
return abs(a - b)
class Integer(Dimension):
"""Search space dimension that can take on integer values.
Parameters
----------
low : int
Lower bound (inclusive).
high : int
Upper bound (inclusive).
prior : "uniform" or "log-uniform", default="uniform"
Distribution to use when sampling random integers for
this dimension.
- If `"uniform"`, intgers are sampled uniformly between the lower
and upper bounds.
- If `"log-uniform"`, intgers are sampled uniformly between
`log(lower, base)` and `log(upper, base)` where log
has base `base`.
base : int
The logarithmic base to use for a log-uniform prior.
- Default 10, otherwise commonly 2.
transform : "identity", "normalize", optional
The following transformations are supported.
- "identity", (default) the transformed space is the same as the
original space.
- "normalize", the transformed space is scaled to be between
0 and 1.
name : str or None
Name associated with dimension, e.g., "number of trees".
dtype : str or dtype, default=np.int64
integer type which will be used in inverse_transform,
can be int, np.int16, np.uint32, np.int32, np.int64 (default).
When set to int, `inverse_transform` returns a list instead of
a numpy array
"""
def __init__(self, low, high, prior="uniform", base=10, transform=None,
name=None, dtype=np.int64):
if high <= low:
raise ValueError("the lower bound {} has to be less than the"
" upper bound {}".format(low, high))
self.low = low
self.high = high
self.prior = prior
self.base = base
self.log_base = np.log10(base)
self.name = name
self.dtype = dtype
if isinstance(self.dtype, str) and self.dtype\
not in ['int', 'int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64']:
raise ValueError("dtype must be 'int', 'int8', 'int16',"
"'int32', 'int64', 'uint8',"
"'uint16', 'uint32', or"
"'uint64', but got {}".format(self.dtype))
elif isinstance(self.dtype, type) and self.dtype\
not in [int, np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64]:
raise ValueError("dtype must be 'int', 'np.int8', 'np.int16',"
"'np.int32', 'np.int64', 'np.uint8',"
"'np.uint16', 'np.uint32', or"
"'np.uint64', but got {}".format(self.dtype))
if transform is None:
transform = "identity"
self.transform_ = transform
if transform not in ["normalize", "identity"]:
raise ValueError("transform should be 'normalize' or 'identity'"
" got {}".format(self.transform_))
if self.transform_ == "normalize":
self._rvs = _uniform_inclusive(0.0, 1.0)
if self.prior == "uniform":
self.transformer = Pipeline(
[Identity(), Normalize(low, high, is_int=True)])
else:
self.transformer = Pipeline(
[LogN(self.base),
Normalize(np.log10(low) / self.log_base,
np.log10(high) / self.log_base)]
)
else:
if self.prior == "uniform":
self._rvs = randint(self.low, self.high + 1)
self.transformer = Identity()
else:
self._rvs = _uniform_inclusive(
np.log10(self.low) / self.log_base,
np.log10(self.high) / self.log_base -
np.log10(self.low) / self.log_base)
self.transformer = LogN(self.base)
def __eq__(self, other):
return (type(self) is type(other) and
np.allclose([self.low], [other.low]) and
np.allclose([self.high], [other.high]))
def __repr__(self):
return "Integer(low={}, high={}, prior='{}', transform='{}')".format(
self.low, self.high, self.prior, self.transform_)
def inverse_transform(self, Xt):
"""Inverse transform samples from the warped space back into the
original space.
"""
# The concatenation of all transformed dimensions makes Xt to be
# of type float, hence the required cast back to int.
inv_transform = super(Integer, self).inverse_transform(Xt)
if isinstance(inv_transform, list):
inv_transform = np.array(inv_transform)
if self.dtype == int or self.dtype == 'int':
# necessary, otherwise the type is converted to a numpy type
return getattr(np.round(inv_transform).astype(self.dtype),
"tolist", lambda: value)()
else:
return np.round(inv_transform).astype(self.dtype)
@property
def bounds(self):
return (self.low, self.high)
def __contains__(self, point):
if isinstance(point, list):
point = np.array(point)
return self.low <= point <= self.high
@property
def transformed_bounds(self):
if self.transform_ == "normalize":
return 0, 1
else:
return (self.low, self.high)
def distance(self, a, b):
"""Compute distance between point `a` and `b`.
Parameters
----------
a : int
First point.
b : int
Second point.
"""
if not (a in self and b in self):
raise RuntimeError("Can only compute distance for values within "
"the space, not %s and %s." % (a, b))
return abs(a - b)
class Categorical(Dimension):
"""Search space dimension that can take on categorical values.
Parameters
----------
categories : list, shape=(n_categories,)
Sequence of possible categories.
prior : list, shape=(categories,), default=None
Prior probabilities for each category. By default all categories
are equally likely.
transform : "onehot", "string", "identity", default="onehot"
- "identity", the transformed space is the same as the original
space.
- "string", the transformed space is a string encoded
representation of the original space.
- "onehot", the transformed space is a one-hot encoded
representation of the original space.
name : str or None
Name associated with dimension, e.g., "colors".
"""
def __init__(self, categories, prior=None, transform=None, name=None):
self.categories = tuple(categories)
self.name = name
if transform is None:
transform = "onehot"
self.transform_ = transform
if transform not in ["identity", "onehot", "string"]:
raise ValueError("Expected transform to be 'identity', 'string' or"
"'onehot' got {}".format(transform))
if transform == "onehot":
self.transformer = CategoricalEncoder()
self.transformer.fit(self.categories)
elif transform == "string":
self.transformer = StringEncoder()
self.transformer.fit(self.categories)
else:
self.transformer = Identity()
self.transformer.fit(self.categories)
self.prior = prior
if prior is None:
self.prior_ = np.tile(1. / len(self.categories),
len(self.categories))
else:
self.prior_ = prior
# XXX check that sum(prior) == 1
self._rvs = rv_discrete(
values=(range(len(self.categories)), self.prior_)
)
def __eq__(self, other):
return (type(self) is type(other) and
self.categories == other.categories and
np.allclose(self.prior_, other.prior_))
def __repr__(self):
if len(self.categories) > 7:
cats = self.categories[:3] + (_Ellipsis(),) + self.categories[-3:]
else:
cats = self.categories
if self.prior is not None and len(self.prior) > 7:
prior = self.prior[:3] + [_Ellipsis()] + self.prior[-3:]
else:
prior = self.prior
return "Categorical(categories={}, prior={})".format(cats, prior)
def rvs(self, n_samples=None, random_state=None):
choices = self._rvs.rvs(size=n_samples, random_state=random_state)
if isinstance(choices, numbers.Integral):
return self.categories[choices]
else:
return [self.categories[c] for c in choices]
@property
def transformed_size(self):
if self.transform_ == "onehot":
size = len(self.categories)
# when len(categories) == 2, CategoricalEncoder outputs a
# single value
return size if size != 2 else 1
return 1
@property
def bounds(self):
return self.categories
def __contains__(self, point):
return point in self.categories
@property
def transformed_bounds(self):
if self.transformed_size == 1:
return (0.0, 1.0)
else:
return [(0.0, 1.0) for i in range(self.transformed_size)]
def distance(self, a, b):
"""Compute distance between category `a` and `b`.
As categories have no order the distance between two points is one
if a != b and zero otherwise.
Parameters
----------
a : category
First category.
b : category
Second category.
"""
if not (a in self and b in self):
raise RuntimeError("Can only compute distance for values within"
" the space, not {} and {}.".format(a, b))
return 1 if a != b else 0
class Space(object):
"""Initialize a search space from given specifications.
Parameters
----------
dimensions : list, shape=(n_dims,)
List of search space dimensions.
Each search dimension can be defined either as
- a `(lower_bound, upper_bound)` tuple (for `Real` or `Integer`
dimensions),
- a `(lower_bound, upper_bound, "prior")` tuple (for `Real`
dimensions),
- as a list of categories (for `Categorical` dimensions), or
- an instance of a `Dimension` object (`Real`, `Integer` or
`Categorical`).
.. note::
The upper and lower bounds are inclusive for `Integer`
dimensions.
"""
def __init__(self, dimensions):
self.dimensions = [check_dimension(dim) for dim in dimensions]
def __eq__(self, other):
return all([a == b for a, b in zip(self.dimensions, other.dimensions)])
def __repr__(self):
if len(self.dimensions) > 31:
dims = self.dimensions[:15] + [_Ellipsis()] + self.dimensions[-15:]
else:
dims = self.dimensions
return "Space([{}])".format(',\n '.join(map(str, dims)))
def __iter__(self):
return iter(self.dimensions)
@property
def is_real(self):
"""
Returns true if all dimensions are Real
"""
return all([isinstance(dim, Real) for dim in self.dimensions])
@classmethod
def from_yaml(cls, yml_path, namespace=None):
"""Create Space from yaml configuration file
Parameters
----------
yml_path : str
Full path to yaml configuration file, example YaML below:
Space:
- Integer:
low: -5
high: 5
- Categorical:
categories:
- a
- b
- Real:
low: 1.0
high: 5.0
prior: log-uniform
namespace : str, default=None
Namespace within configuration file to use, will use first
namespace if not provided
Returns
-------
space : Space
Instantiated Space object
"""
with open(yml_path, 'rb') as f:
config = yaml.safe_load(f)
dimension_classes = {'real': Real,
'integer': Integer,
'categorical': Categorical}
# Extract space options for configuration file
if isinstance(config, dict):
if namespace is None:
options = next(iter(config.values()))
else:
options = config[namespace]
elif isinstance(config, list):
options = config
else:
raise TypeError('YaML does not specify a list or dictionary')
# Populate list with Dimension objects
dimensions = []
for option in options:
key = next(iter(option.keys()))
# Make configuration case insensitive
dimension_class = key.lower()
values = {k.lower(): v for k, v in option[key].items()}
if dimension_class in dimension_classes:
# Instantiate Dimension subclass and add it to the list
dimension = dimension_classes[dimension_class](**values)
dimensions.append(dimension)
space = cls(dimensions=dimensions)
return space
def rvs(self, n_samples=1, random_state=None):
"""Draw random samples.
The samples are in the original space. They need to be transformed
before being passed to a model or minimizer by `space.transform()`.
Parameters
----------
n_samples : int, default=1
Number of samples to be drawn from the space.
random_state : int, RandomState instance, or None (default)
Set random state to something other than None for reproducible
results.
Returns
-------
points : list of lists, shape=(n_points, n_dims)
Points sampled from the space.
"""
rng = check_random_state(random_state)
# Draw
columns = []
for dim in self.dimensions:
if sp_version < (0, 16):
columns.append(dim.rvs(n_samples=n_samples))
else:
columns.append(dim.rvs(n_samples=n_samples, random_state=rng))
# Transpose
rows = []
for i in range(n_samples):
r = []
for j in range(self.n_dims):
r.append(columns[j][i])
rows.append(r)
return rows
def transform(self, X):
"""Transform samples from the original space into a warped space.
Note: this transformation is expected to be used to project samples
into a suitable space for numerical optimization.
Parameters
----------
X : list of lists, shape=(n_samples, n_dims)
The samples to transform.
Returns
-------
Xt : array of floats, shape=(n_samples, transformed_n_dims)
The transformed samples.
"""
# Pack by dimension
columns = []
for dim in self.dimensions:
columns.append([])
for i in range(len(X)):
for j in range(self.n_dims):
columns[j].append(X[i][j])
# Transform
for j in range(self.n_dims):
columns[j] = self.dimensions[j].transform(columns[j])
# Repack as an array
Xt = np.hstack([np.asarray(c).reshape((len(X), -1)) for c in columns])
return Xt
def inverse_transform(self, Xt):
"""Inverse transform samples from the warped space back to the
original space.
Parameters
----------
Xt : array of floats, shape=(n_samples, transformed_n_dims)
The samples to inverse transform.
Returns
-------
X : list of lists, shape=(n_samples, n_dims)
The original samples.
"""
# Inverse transform
columns = []
start = 0
for j in range(self.n_dims):
dim = self.dimensions[j]
offset = dim.transformed_size
if offset == 1:
columns.append(dim.inverse_transform(Xt[:, start]))
else:
columns.append(
dim.inverse_transform(Xt[:, start:start + offset]))
start += offset
# Transpose
rows = []
for i in range(len(Xt)):
r = []
for j in range(self.n_dims):
r.append(columns[j][i])
rows.append(r)
return rows
@property
def n_dims(self):
"""The dimensionality of the original space."""
return len(self.dimensions)
@property
def transformed_n_dims(self):
"""The dimensionality of the warped space."""
return sum([dim.transformed_size for dim in self.dimensions])
@property
def bounds(self):
"""The dimension bounds, in the original space."""
b = []
for dim in self.dimensions:
if dim.size == 1:
b.append(dim.bounds)
else:
b.extend(dim.bounds)
return b
def __contains__(self, point):
"""Check that `point` is within the bounds of the space."""
for component, dim in zip(point, self.dimensions):
if component not in dim:
return False
return True
@property
def transformed_bounds(self):
"""The dimension bounds, in the warped space."""
b = []
for dim in self.dimensions:
if dim.transformed_size == 1:
b.append(dim.transformed_bounds)
else:
b.extend(dim.transformed_bounds)
return b
@property
def is_categorical(self):
"""Space contains exclusively categorical dimensions"""
return all([isinstance(dim, Categorical) for dim in self.dimensions])
@property
def is_partly_categorical(self):
"""Space contains any categorical dimensions"""
return any([isinstance(dim, Categorical) for dim in self.dimensions])
def distance(self, point_a, point_b):
"""Compute distance between two points in this space.
Parameters
----------
point_a : array
First point.
point_b : array
Second point.
"""
distance = 0.
for a, b, dim in zip(point_a, point_b, self.dimensions):
distance += dim.distance(a, b)
return distance | 35.886338 | 79 | 0.566657 |
4a1fc7d60f3ddb63481c3d7e849e37f2277cb291 | 16,338 | py | Python | sympy/functions/elementary/tests/test_exponential.py | CameronKing/sympy | 3295b02c617a10ea8db0a070356cc0ba5a3b5121 | [
"BSD-3-Clause"
] | null | null | null | sympy/functions/elementary/tests/test_exponential.py | CameronKing/sympy | 3295b02c617a10ea8db0a070356cc0ba5a3b5121 | [
"BSD-3-Clause"
] | null | null | null | sympy/functions/elementary/tests/test_exponential.py | CameronKing/sympy | 3295b02c617a10ea8db0a070356cc0ba5a3b5121 | [
"BSD-3-Clause"
] | null | null | null | from sympy import (
symbols, log, ln, Float, nan, oo, zoo, I, pi, E, exp, Symbol,
LambertW, sqrt, Rational, expand_log, S, sign, conjugate, refine,
sin, cos, sinh, cosh, tanh, exp_polar, re, Function, simplify,
AccumBounds, MatrixSymbol, Pow)
from sympy.abc import x, y, z
from sympy.core.expr import unchanged
from sympy.core.function import ArgumentIndexError
from sympy.utilities.pytest import raises
def test_exp_values():
k = Symbol('k', integer=True)
assert exp(nan) == nan
assert exp(oo) == oo
assert exp(-oo) == 0
assert exp(0) == 1
assert exp(1) == E
assert exp(-1 + x).as_base_exp() == (S.Exp1, x - 1)
assert exp(1 + x).as_base_exp() == (S.Exp1, x + 1)
assert exp(pi*I/2) == I
assert exp(pi*I) == -1
assert exp(3*pi*I/2) == -I
assert exp(2*pi*I) == 1
assert refine(exp(pi*I*2*k)) == 1
assert refine(exp(pi*I*2*(k + Rational(1, 2)))) == -1
assert refine(exp(pi*I*2*(k + Rational(1, 4)))) == I
assert refine(exp(pi*I*2*(k + Rational(3, 4)))) == -I
assert exp(log(x)) == x
assert exp(2*log(x)) == x**2
assert exp(pi*log(x)) == x**pi
assert exp(17*log(x) + E*log(y)) == x**17 * y**E
assert exp(x*log(x)) != x**x
assert exp(sin(x)*log(x)) != x
assert exp(3*log(x) + oo*x) == exp(oo*x) * x**3
assert exp(4*log(x)*log(y) + 3*log(x)) == x**3 * exp(4*log(x)*log(y))
assert exp(-oo, evaluate=False).is_finite is True
assert exp(oo, evaluate=False).is_finite is False
def test_exp_log():
x = Symbol("x", real=True)
assert log(exp(x)) == x
assert exp(log(x)) == x
assert log(x).inverse() == exp
assert exp(x).inverse() == log
y = Symbol("y", polar=True)
assert log(exp_polar(z)) == z
assert exp(log(y)) == y
def test_exp_expand():
e = exp(log(Rational(2))*(1 + x) - log(Rational(2))*x)
assert e.expand() == 2
assert exp(x + y) != exp(x)*exp(y)
assert exp(x + y).expand() == exp(x)*exp(y)
def test_exp__as_base_exp():
assert exp(x).as_base_exp() == (E, x)
assert exp(2*x).as_base_exp() == (E, 2*x)
assert exp(x*y).as_base_exp() == (E, x*y)
assert exp(-x).as_base_exp() == (E, -x)
# Pow( *expr.as_base_exp() ) == expr invariant should hold
assert E**x == exp(x)
assert E**(2*x) == exp(2*x)
assert E**(x*y) == exp(x*y)
assert exp(x).base is S.Exp1
assert exp(x).exp == x
def test_exp_infinity():
assert exp(I*y) != nan
assert refine(exp(I*oo)) == nan
assert refine(exp(-I*oo)) == nan
assert exp(y*I*oo) != nan
assert exp(zoo) == nan
def test_exp_subs():
x = Symbol('x')
e = (exp(3*log(x), evaluate=False)) # evaluates to x**3
assert e.subs(x**3, y**3) == e
assert e.subs(x**2, 5) == e
assert (x**3).subs(x**2, y) != y**(3/S(2))
assert exp(exp(x) + exp(x**2)).subs(exp(exp(x)), y) == y * exp(exp(x**2))
assert exp(x).subs(E, y) == y**x
x = symbols('x', real=True)
assert exp(5*x).subs(exp(7*x), y) == y**Rational(5, 7)
assert exp(2*x + 7).subs(exp(3*x), y) == y**Rational(2, 3) * exp(7)
x = symbols('x', positive=True)
assert exp(3*log(x)).subs(x**2, y) == y**Rational(3, 2)
# differentiate between E and exp
assert exp(exp(x + E)).subs(exp, 3) == 3**(3**(x + E))
assert exp(exp(x + E)).subs(E, 3) == 3**(3**(x + 3))
assert exp(3).subs(E, sin) == sin(3)
def test_exp_conjugate():
assert conjugate(exp(x)) == exp(conjugate(x))
def test_exp_rewrite():
from sympy.concrete.summations import Sum
assert exp(x).rewrite(sin) == sinh(x) + cosh(x)
assert exp(x*I).rewrite(cos) == cos(x) + I*sin(x)
assert exp(1).rewrite(cos) == sinh(1) + cosh(1)
assert exp(1).rewrite(sin) == sinh(1) + cosh(1)
assert exp(1).rewrite(sin) == sinh(1) + cosh(1)
assert exp(x).rewrite(tanh) == (1 + tanh(x/2))/(1 - tanh(x/2))
assert exp(pi*I/4).rewrite(sqrt) == sqrt(2)/2 + sqrt(2)*I/2
assert exp(pi*I/3).rewrite(sqrt) == S(1)/2 + sqrt(3)*I/2
assert exp(x*log(y)).rewrite(Pow) == y**x
assert exp(log(x)*log(y)).rewrite(Pow) in [x**log(y), y**log(x)]
assert exp(log(log(x))*y).rewrite(Pow) == log(x)**y
n = Symbol('n', integer=True)
assert Sum((exp(pi*I/2)/2)**n, (n, 0, oo)).rewrite(sqrt).doit() == S(4)/5 + 2*I/5
assert Sum((exp(pi*I/4)/2)**n, (n, 0, oo)).rewrite(sqrt).doit() == 1/(1 - sqrt(2)*(1 + I)/4)
assert Sum((exp(pi*I/3)/2)**n, (n, 0, oo)).rewrite(sqrt).doit() == 1/(S(3)/4 - sqrt(3)*I/4)
def test_exp_leading_term():
assert exp(x).as_leading_term(x) == 1
assert exp(1/x).as_leading_term(x) == exp(1/x)
assert exp(2 + x).as_leading_term(x) == exp(2)
def test_exp_taylor_term():
x = symbols('x')
assert exp(x).taylor_term(1, x) == x
assert exp(x).taylor_term(3, x) == x**3/6
assert exp(x).taylor_term(4, x) == x**4/24
assert exp(x).taylor_term(-1, x) == S.Zero
def test_exp_MatrixSymbol():
A = MatrixSymbol("A", 2, 2)
assert exp(A).has(exp)
def test_exp_fdiff():
x = Symbol('x')
raises(ArgumentIndexError, lambda: exp(x).fdiff(2))
def test_log_values():
assert log(nan) == nan
assert log(oo) == oo
assert log(-oo) == oo
assert log(zoo) == zoo
assert log(-zoo) == zoo
assert log(0) == zoo
assert log(1) == 0
assert log(-1) == I*pi
assert log(E) == 1
assert log(-E).expand() == 1 + I*pi
assert unchanged(log, pi)
assert log(-pi).expand() == log(pi) + I*pi
assert unchanged(log, 17)
assert log(-17) == log(17) + I*pi
assert log(I) == I*pi/2
assert log(-I) == -I*pi/2
assert log(17*I) == I*pi/2 + log(17)
assert log(-17*I).expand() == -I*pi/2 + log(17)
assert log(oo*I) == oo
assert log(-oo*I) == oo
assert log(0, 2) == zoo
assert log(0, 5) == zoo
assert exp(-log(3))**(-1) == 3
assert log(S.Half) == -log(2)
assert log(2*3).func is log
assert log(2*3**2).func is log
def test_log_base():
assert log(1, 2) == 0
assert log(2, 2) == 1
assert log(3, 2) == log(3)/log(2)
assert log(6, 2) == 1 + log(3)/log(2)
assert log(6, 3) == 1 + log(2)/log(3)
assert log(2**3, 2) == 3
assert log(3**3, 3) == 3
assert log(5, 1) == zoo
assert log(1, 1) == nan
assert log(Rational(2, 3), 10) == log(S(2)/3)/log(10)
assert log(Rational(2, 3), Rational(1, 3)) == -log(2)/log(3) + 1
assert log(Rational(2, 3), Rational(2, 5)) == \
log(S(2)/3)/log(S(2)/5)
# issue 17148
assert log(S(8)/3, 2) == -log(3)/log(2) + 3
def test_log_symbolic():
assert log(x, exp(1)) == log(x)
assert log(exp(x)) != x
assert log(x, exp(1)) == log(x)
assert log(x*y) != log(x) + log(y)
assert log(x/y).expand() != log(x) - log(y)
assert log(x/y).expand(force=True) == log(x) - log(y)
assert log(x**y).expand() != y*log(x)
assert log(x**y).expand(force=True) == y*log(x)
assert log(x, 2) == log(x)/log(2)
assert log(E, 2) == 1/log(2)
p, q = symbols('p,q', positive=True)
r = Symbol('r', real=True)
assert log(p**2) != 2*log(p)
assert log(p**2).expand() == 2*log(p)
assert log(x**2).expand() != 2*log(x)
assert log(p**q) != q*log(p)
assert log(exp(p)) == p
assert log(p*q) != log(p) + log(q)
assert log(p*q).expand() == log(p) + log(q)
assert log(-sqrt(3)) == log(sqrt(3)) + I*pi
assert log(-exp(p)) != p + I*pi
assert log(-exp(x)).expand() != x + I*pi
assert log(-exp(r)).expand() == r + I*pi
assert log(x**y) != y*log(x)
assert (log(x**-5)**-1).expand() != -1/log(x)/5
assert (log(p**-5)**-1).expand() == -1/log(p)/5
assert log(-x).func is log and log(-x).args[0] == -x
assert log(-p).func is log and log(-p).args[0] == -p
def test_exp_assumptions():
r = Symbol('r', real=True)
i = Symbol('i', imaginary=True)
for e in exp, exp_polar:
assert e(x).is_real is None
assert e(x).is_imaginary is None
assert e(i).is_real is None
assert e(i).is_imaginary is None
assert e(r).is_real is True
assert e(r).is_imaginary is False
assert e(re(x)).is_extended_real is True
assert e(re(x)).is_imaginary is False
assert exp(0, evaluate=False).is_algebraic
a = Symbol('a', algebraic=True)
an = Symbol('an', algebraic=True, nonzero=True)
r = Symbol('r', rational=True)
rn = Symbol('rn', rational=True, nonzero=True)
assert exp(a).is_algebraic is None
assert exp(an).is_algebraic is False
assert exp(pi*r).is_algebraic is None
assert exp(pi*rn).is_algebraic is False
def test_exp_AccumBounds():
assert exp(AccumBounds(1, 2)) == AccumBounds(E, E**2)
def test_log_assumptions():
p = symbols('p', positive=True)
n = symbols('n', negative=True)
z = symbols('z', zero=True)
x = symbols('x', infinite=True, extended_positive=True)
assert log(z).is_positive is False
assert log(x).is_extended_positive is True
assert log(2) > 0
assert log(1, evaluate=False).is_zero
assert log(1 + z).is_zero
assert log(p).is_zero is None
assert log(n).is_zero is False
assert log(0.5).is_negative is True
assert log(exp(p) + 1).is_positive
assert log(1, evaluate=False).is_algebraic
assert log(42, evaluate=False).is_algebraic is False
assert log(1 + z).is_rational
def test_log_hashing():
assert x != log(log(x))
assert hash(x) != hash(log(log(x)))
assert log(x) != log(log(log(x)))
e = 1/log(log(x) + log(log(x)))
assert e.base.func is log
e = 1/log(log(x) + log(log(log(x))))
assert e.base.func is log
e = log(log(x))
assert e.func is log
assert not x.func is log
assert hash(log(log(x))) != hash(x)
assert e != x
def test_log_sign():
assert sign(log(2)) == 1
def test_log_expand_complex():
assert log(1 + I).expand(complex=True) == log(2)/2 + I*pi/4
assert log(1 - sqrt(2)).expand(complex=True) == log(sqrt(2) - 1) + I*pi
def test_log_apply_evalf():
value = (log(3)/log(2) - 1).evalf()
assert value.epsilon_eq(Float("0.58496250072115618145373"))
def test_log_expand():
w = Symbol("w", positive=True)
e = log(w**(log(5)/log(3)))
assert e.expand() == log(5)/log(3) * log(w)
x, y, z = symbols('x,y,z', positive=True)
assert log(x*(y + z)).expand(mul=False) == log(x) + log(y + z)
assert log(log(x**2)*log(y*z)).expand() in [log(2*log(x)*log(y) +
2*log(x)*log(z)), log(log(x)*log(z) + log(y)*log(x)) + log(2),
log((log(y) + log(z))*log(x)) + log(2)]
assert log(x**log(x**2)).expand(deep=False) == log(x)*log(x**2)
assert log(x**log(x**2)).expand() == 2*log(x)**2
assert (log(x*(y + z))*(x + y)), expand(mul=True, log=True) == y*log(
x) + y*log(y + z) + z*log(x) + z*log(y + z)
x, y = symbols('x,y')
assert log(x*y).expand(force=True) == log(x) + log(y)
assert log(x**y).expand(force=True) == y*log(x)
assert log(exp(x)).expand(force=True) == x
# there's generally no need to expand out logs since this requires
# factoring and if simplification is sought, it's cheaper to put
# logs together than it is to take them apart.
assert log(2*3**2).expand() != 2*log(3) + log(2)
def test_log_simplify():
x = Symbol("x", positive=True)
assert log(x**2).expand() == 2*log(x)
assert expand_log(log(x**(2 + log(2)))) == (2 + log(2))*log(x)
z = Symbol('z')
assert log(sqrt(z)).expand() == log(z)/2
assert expand_log(log(z**(log(2) - 1))) == (log(2) - 1)*log(z)
assert log(z**(-1)).expand() != -log(z)
assert log(z**(x/(x+1))).expand() == x*log(z)/(x + 1)
def test_log_AccumBounds():
assert log(AccumBounds(1, E)) == AccumBounds(0, 1)
def test_lambertw():
k = Symbol('k')
assert LambertW(x, 0) == LambertW(x)
assert LambertW(x, 0, evaluate=False) != LambertW(x)
assert LambertW(0) == 0
assert LambertW(E) == 1
assert LambertW(-1/E) == -1
assert LambertW(-log(2)/2) == -log(2)
assert LambertW(oo) == oo
assert LambertW(0, 1) == -oo
assert LambertW(0, 42) == -oo
assert LambertW(-pi/2, -1) == -I*pi/2
assert LambertW(-1/E, -1) == -1
assert LambertW(-2*exp(-2), -1) == -2
assert LambertW(x**2).diff(x) == 2*LambertW(x**2)/x/(1 + LambertW(x**2))
assert LambertW(x, k).diff(x) == LambertW(x, k)/x/(1 + LambertW(x, k))
assert LambertW(sqrt(2)).evalf(30).epsilon_eq(
Float("0.701338383413663009202120278965", 30), 1e-29)
assert re(LambertW(2, -1)).evalf().epsilon_eq(Float("-0.834310366631110"))
assert LambertW(-1).is_real is False # issue 5215
assert LambertW(2, evaluate=False).is_real
p = Symbol('p', positive=True)
assert LambertW(p, evaluate=False).is_real
assert LambertW(p - 1, evaluate=False).is_real is None
assert LambertW(-p - 2/S.Exp1, evaluate=False).is_real is False
assert LambertW(S.Half, -1, evaluate=False).is_real is False
assert LambertW(-S.One/10, -1, evaluate=False).is_real
assert LambertW(-10, -1, evaluate=False).is_real is False
assert LambertW(-2, 2, evaluate=False).is_real is False
assert LambertW(0, evaluate=False).is_algebraic
na = Symbol('na', nonzero=True, algebraic=True)
assert LambertW(na).is_algebraic is False
def test_issue_5673():
e = LambertW(-1)
assert e.is_comparable is False
assert e.is_positive is not True
e2 = 1 - 1/(1 - exp(-1000))
assert e.is_positive is not True
e3 = -2 + exp(exp(LambertW(log(2)))*LambertW(log(2)))
assert e3.is_nonzero is not True
def test_log_fdiff():
x = Symbol('x')
raises(ArgumentIndexError, lambda: log(x).fdiff(2))
def test_log_taylor_term():
x = symbols('x')
assert log(x).taylor_term(0, x) == x
assert log(x).taylor_term(1, x) == -x**2/2
assert log(x).taylor_term(4, x) == x**5/5
assert log(x).taylor_term(-1, x) == S.Zero
def test_exp_expand_NC():
A, B, C = symbols('A,B,C', commutative=False)
assert exp(A + B).expand() == exp(A + B)
assert exp(A + B + C).expand() == exp(A + B + C)
assert exp(x + y).expand() == exp(x)*exp(y)
assert exp(x + y + z).expand() == exp(x)*exp(y)*exp(z)
def test_as_numer_denom():
n = symbols('n', negative=True)
assert exp(x).as_numer_denom() == (exp(x), 1)
assert exp(-x).as_numer_denom() == (1, exp(x))
assert exp(-2*x).as_numer_denom() == (1, exp(2*x))
assert exp(-2).as_numer_denom() == (1, exp(2))
assert exp(n).as_numer_denom() == (1, exp(-n))
assert exp(-n).as_numer_denom() == (exp(-n), 1)
assert exp(-I*x).as_numer_denom() == (1, exp(I*x))
assert exp(-I*n).as_numer_denom() == (1, exp(I*n))
assert exp(-n).as_numer_denom() == (exp(-n), 1)
def test_polar():
x, y = symbols('x y', polar=True)
assert abs(exp_polar(I*4)) == 1
assert abs(exp_polar(0)) == 1
assert abs(exp_polar(2 + 3*I)) == exp(2)
assert exp_polar(I*10).n() == exp_polar(I*10)
assert log(exp_polar(z)) == z
assert log(x*y).expand() == log(x) + log(y)
assert log(x**z).expand() == z*log(x)
assert exp_polar(3).exp == 3
# Compare exp(1.0*pi*I).
assert (exp_polar(1.0*pi*I).n(n=5)).as_real_imag()[1] >= 0
assert exp_polar(0).is_rational is True # issue 8008
def test_log_product():
from sympy.abc import n, m
i, j = symbols('i,j', positive=True, integer=True)
x, y = symbols('x,y', positive=True)
from sympy.concrete import Product, Sum
f, g = Function('f'), Function('g')
assert simplify(log(Product(x**i, (i, 1, n)))) == log(Product(x**i, (i, 1, n)))
assert simplify(log(Product(x**i*y**j, (i, 1, n), (j, 1, m)))) == \
log(Product(x**i*y**j, (i, 1, n), (j, 1, m)))
expr = log(Product(-2, (n, 0, 4)))
assert simplify(expr) == expr
def test_issue_8866():
assert simplify(log(x, 10, evaluate=False)) == simplify(log(x, 10))
assert expand_log(log(x, 10, evaluate=False)) == expand_log(log(x, 10))
y = Symbol('y', positive=True)
l1 = log(exp(y), exp(10))
b1 = log(exp(y), exp(5))
l2 = log(exp(y), exp(10), evaluate=False)
b2 = log(exp(y), exp(5), evaluate=False)
assert simplify(log(l1, b1)) == simplify(log(l2, b2))
assert expand_log(log(l1, b1)) == expand_log(log(l2, b2))
def test_issue_9116():
n = Symbol('n', positive=True, integer=True)
assert ln(n).is_nonnegative is True
assert log(n).is_nonnegative is True
| 31.724272 | 96 | 0.579753 |
4a1fcaf2f4742b4421b03272125e622a9e9c5fe6 | 8,128 | py | Python | tests/test_grammar.py | rium9/mlangpy | 75821306b15d72278220d2a1a403daa36f60cc4a | [
"MIT"
] | 1 | 2020-04-20T20:23:31.000Z | 2020-04-20T20:23:31.000Z | tests/test_grammar.py | rium9/mlangpy | 75821306b15d72278220d2a1a403daa36f60cc4a | [
"MIT"
] | null | null | null | tests/test_grammar.py | rium9/mlangpy | 75821306b15d72278220d2a1a403daa36f60cc4a | [
"MIT"
] | null | null | null | from unittest import TestCase
from mlangpy.grammar import *
class TestSymbol(TestCase):
def setUp(self):
class TSym(Symbol):
pass
class NSym(Symbol):
def __init__(self, subject):
super().__init__(subject, left_bound='/', right_bound='/')
self.TSym = TSym
self.NSym = NSym
def test__eq__1(self):
s = Symbol('a')
self.assertEqual(s, s)
def test__eq__2(self):
s = Symbol('a')
t = Symbol('b')
self.assertNotEqual(s, t)
def test__eq__3(self):
s = Symbol('a', left_bound='/', right_bound='\\')
t = Symbol('a', left_bound='#', right_bound='#')
self.assertEqual(s, t)
def test__eq__4(self):
s = Symbol('a', left_bound='/', right_bound='\\')
t = Symbol('a', left_bound='#', right_bound='#')
self.assertEqual(t, s)
def test__eq__5(self):
# Subclasses can be equal to their supers, but not to their siblings.
s = Symbol('a')
t = self.TSym('a')
self.assertEqual(s, t)
def test__eq__6(self):
s = Symbol('a')
t = self.TSym('a')
self.assertEqual(t, s)
def test__eq__7(self):
t = self.TSym('a')
n = self.NSym('b')
self.assertNotEqual(t, n)
class TestSequences(TestCase):
def test__eq__1(self):
f = Sequence(['a', 'b'])
g = Sequence(['a', 'b'])
self.assertEqual(f, g)
def test__eq__2(self):
# Subclasses can be deemed equal, as long as terms are equal
class GSeq(Sequence):
def __init__(self, terms):
super().__init__(terms, separator='/')
f = Sequence(['a', 'b'])
g = GSeq(['a', 'b'])
self.assertEqual(f, g)
def test__eq__3(self):
f = Sequence(['a', 'b'])
g = Sequence(['a', 'c'])
self.assertNotEqual(f, g)
def test__eq__4(self):
f = Sequence([])
g = Sequence([])
self.assertEqual(f, g)
def test__eq__5(self):
class GSeq(Sequence):
def __init__(self, terms):
super().__init__(terms, separator='/')
f = Sequence(['a', 'b'])
g = GSeq(['a', 'b'])
self.assertEqual(g, f)
def test_addition1(self):
f = Sequence(['a', 'b'])
b = 'c'
self.assertEqual((f + b)[2], 'c')
def test_addition2(self):
f = Sequence(['a', 'b'])
b = 'c'
self.assertEqual((b + f).terms[0], 'c')
def test_addition3(self):
f = Sequence([])
b = 'c'
self.assertEqual((f + b).terms[0], 'c')
def test_addition4(self):
f = Sequence([])
g = Sequence([])
assert issubclass((f + g).__class__, Sequence)
self.assertEqual((f + g).terms, [])
def test_addition5(self):
f = Sequence([])
g = Sequence(['a'])
assert issubclass((f + g).__class__, Sequence)
self.assertEqual((f + g).terms[0], 'a')
def test_addition6(self):
f = Sequence(['a', 'b'])
g = Sequence(['c', 'd'])
assert issubclass((f + g).__class__, Sequence)
assert (f + g).terms[0] == 'a'
assert (f + g).terms[1] == 'b'
assert (f + g).terms[2] == 'c'
assert (f + g).terms[3] == 'd'
def test_addition7(self):
f = Sequence(['a', 'b'])
g = Sequence([])
assert issubclass((f + g).__class__, Sequence)
assert (f + g).terms[0] == 'a'
assert (f + g).terms[1] == 'b'
assert len((f + g).terms) == 2
class TestDefinitionList(TestCase):
def test_addition1(self):
pass
class TestRuleset(TestCase):
def setUp(self):
# Example rules: Ruleset equality checks are recursive so just assume Rules are equal
self.r1 = Rule(
NonTerminal('a'),
[Concat([Terminal('b')])]
)
self.r2 = self.r1
self.ruleset = Ruleset([self.r1, self.r2])
def test__eq__1(self):
self.assertEqual(Ruleset([]), Ruleset([]))
def test__eq__2(self):
self.assertEqual(self.ruleset, self.ruleset)
def test__eq__3(self):
new_rule = Ruleset([self.r1, self.r2])
self.assertEqual(self.ruleset, new_rule)
def test__eq__4(self):
new_rule = Ruleset([self.r1, self.r2])
self.assertEqual(self.ruleset, new_rule)
# Test adding Rules to a Ruleset
# Addition for Rulesets will only work with Rule and Ruleset (subclasses)
def test_addition1(self):
with self.assertRaises(TypeError):
self.ruleset + 1
def test_addition2(self):
with self.assertRaises(TypeError):
object + self.ruleset
def test_addition3(self):
r = Rule(['a'], [Concat(['b'])])
new_ruleset = Ruleset([]) + r
assert len(new_ruleset) == 1
self.assertTrue(new_ruleset.rules[0], Rule)
def test_addition4(self):
r = Rule(['a'], [Concat(['b'])])
new_ruleset = self.ruleset + r
assert len(new_ruleset) == 3
self.assertTrue(isinstance(new_ruleset.rules[2], Rule))
def test_addition5(self):
r = Rule(['a'], [Concat(['b'])])
new_ruleset = r + self.ruleset
assert len(new_ruleset) == 3
self.assertTrue(isinstance(new_ruleset.rules[0], Rule))
# Test adding Rulesets together
def test_addition6(self):
b = Ruleset(self.ruleset.rules + self.ruleset.rules)
self.assertEqual(self.ruleset + self.ruleset, b)
def test_addition7(self):
r = Rule(NonTerminal('a'), [])
n = Ruleset([r])
q = self.ruleset + n
self.assertTrue(len(q.rules) == 3 and q.rules[2] == r)
def test_addition8(self):
r = Rule(NonTerminal('a'), [])
n = Ruleset([r])
q = n + self.ruleset
self.assertEqual(len(q.rules) == 3 and q.rules[0], r)
# Test __iadd__ for Rules
def test_iaddition1(self):
r = Rule([NonTerminal('a')], [Concat(['b'])])
new_ruleset = copy.deepcopy(self.ruleset)
new_ruleset += r
assert isinstance(new_ruleset, Ruleset)
assert len(new_ruleset.rules) == 3
def test_iaddition2(self):
new_ruleset = Ruleset([])
new_ruleset += Rule([NonTerminal('a')], [Concat(['b'])])
assert isinstance(new_ruleset, Ruleset)
assert len(new_ruleset.rules) == 1
# Test __iadd__ for Rulesets
def test_iaddition3(self):
clone = copy.deepcopy(self.ruleset)
clone += Ruleset([])
assert isinstance(clone, Ruleset)
assert len(clone.rules) == 2
def test_iaddition4(self):
r = Rule(NonTerminal('a'), [])
clone = copy.deepcopy(self.ruleset)
clone += Ruleset([r])
assert isinstance(clone, Ruleset)
assert len(clone.rules) == 3
self.assertEqual(clone.rules[2], r)
def test_iaddition5(self):
with self.assertRaises(TypeError):
self.ruleset += 'hi'
def test_iaddition6(self):
with self.assertRaises(TypeError):
self.ruleset += object()
def test_find_rules_for1(self):
looking_for = self.ruleset[0].right
matches = self.ruleset.find_rules_for(looking_for)
r = Rule(
NonTerminal('a'),
[Concat([Terminal('b')])]
)
self.assertIn(r, matches)
def test_find_rules_for2(self):
r = Rule(
NonTerminal('a'),
[Concat([Terminal('b')])]
)
looking_for = DefList([])
matches = self.ruleset.find_rules_for(looking_for)
self.assertEqual(len(matches), 0)
def test_find_rules1(self):
looking_for = Rule(
NonTerminal('a'),
[Concat([Terminal('b')])]
)
matches = self.ruleset.find_rules(looking_for)
self.assertIn(looking_for, matches)
def test_find_rules2(self):
looking_for = Rule(
NonTerminal('c'),
[Concat([Terminal('b')])]
)
matches = self.ruleset.find_rules(looking_for)
self.assertNotIn(looking_for, matches) | 28.027586 | 93 | 0.551796 |
4a1fcb0a26d4e858532b033f5f5cd22ff55f149f | 307 | py | Python | contrib_lib/settings.py | DHI/ifm_contrib | 443c3a86960990115887855a2f4adac07797fc35 | [
"MIT"
] | 9 | 2018-09-28T12:01:24.000Z | 2021-10-07T15:17:51.000Z | contrib_lib/settings.py | DHI/ifm_contrib | 443c3a86960990115887855a2f4adac07797fc35 | [
"MIT"
] | 11 | 2019-10-23T13:25:07.000Z | 2022-03-21T21:11:42.000Z | contrib_lib/settings.py | DHI/ifm_contrib | 443c3a86960990115887855a2f4adac07797fc35 | [
"MIT"
] | 7 | 2019-04-23T11:01:51.000Z | 2021-08-24T13:33:06.000Z | from ifm import Enum
from .settings_pandas import SettingsPd
class Settings:
"""
Functions for reading and writing global settings
"""
def __init__(self, doc):
self.doc = doc
# add custom child-classes here
self.df = SettingsPd(doc)
# add custom methods here
| 19.1875 | 53 | 0.651466 |
4a1fcbab1c56d6fd714abd8de8dea57f24de3de2 | 2,069 | py | Python | run_video.py | macropusgiganteus/neph_pose_estimation | aecb8a932248181ade1c7e5b2f3b2d668def327a | [
"Apache-2.0"
] | null | null | null | run_video.py | macropusgiganteus/neph_pose_estimation | aecb8a932248181ade1c7e5b2f3b2d668def327a | [
"Apache-2.0"
] | null | null | null | run_video.py | macropusgiganteus/neph_pose_estimation | aecb8a932248181ade1c7e5b2f3b2d668def327a | [
"Apache-2.0"
] | null | null | null | import argparse
import logging
import time
import cv2
import numpy as np
from tf_pose.estimator import TfPoseEstimator
from tf_pose.networks import get_graph_path, model_wh
logger = logging.getLogger('TfPoseEstimator-Video')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
fps_time = 0
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='tf-pose-estimation Video')
parser.add_argument('--video', type=str, default='')
parser.add_argument('--resolution', type=str, default='432x368', help='network input resolution. default=432x368')
parser.add_argument('--model', type=str, default='mobilenet_thin', help='cmu / mobilenet_thin / mobilenet_v2_large / mobilenet_v2_small')
parser.add_argument('--show-process', type=bool, default=False,
help='for debug purpose, if enabled, speed for inference is dropped.')
parser.add_argument('--showBG', type=bool, default=True, help='False to show skeleton only.')
args = parser.parse_args()
logger.debug('initialization %s : %s' % (args.model, get_graph_path(args.model)))
w, h = model_wh(args.resolution)
e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))
cap = cv2.VideoCapture(args.video)
if cap.isOpened() is False:
print("Error opening video stream or file")
while cap.isOpened():
ret_val, image = cap.read()
humans = e.inference(image)
if not args.showBG:
image = np.zeros(image.shape)
image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
cv2.putText(image, "FPS: %f" % (1.0 / (time.time() - fps_time)), (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.imshow('tf-pose-estimation result', image)
fps_time = time.time()
if cv2.waitKey(1) == 27:
break
cv2.destroyAllWindows()
logger.debug('finished+')
| 36.298246 | 141 | 0.685355 |
4a1fcd41466300627710edd4bcd5a6aa297521ae | 3,296 | py | Python | doc/source/conf.py | munnaeebd/openstack-helm | 0a1d6aeb94937a4be49b29b5387c4ad7791eecfd | [
"Apache-2.0"
] | null | null | null | doc/source/conf.py | munnaeebd/openstack-helm | 0a1d6aeb94937a4be49b29b5387c4ad7791eecfd | [
"Apache-2.0"
] | null | null | null | doc/source/conf.py | munnaeebd/openstack-helm | 0a1d6aeb94937a4be49b29b5387c4ad7791eecfd | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'openstackdocstheme',
'sphinxcontrib.blockdiag'
]
# openstackdocstheme options
openstackdocs_repo_name = 'openstack/openstack-helm'
openstackdocs_auto_name = False
openstackdocs_use_storyboard = True
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'openstack-helm'
copyright = u'2016, OpenStack Foundation'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'openstackdocs'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# html_last_updated_fmt = '%Y-%m-%d %H:%M'
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# html_static_path = ['static']
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
'%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack Foundation', 'manual'),
]
# Example configuration for intersphinx: refer to the Python standard library.
# intersphinx_mapping = {'http://docs.python.org/': None}
| 34.333333 | 79 | 0.718447 |
4a1fcfc7645d735e9fa6ae12e244df026f025358 | 1,950 | py | Python | tools/cli_usage.py | mtag-dev/esg | f01e8caef23abb54ebc0db5279cbee3006dffa05 | [
"BSD-3-Clause"
] | 3 | 2022-01-11T19:04:37.000Z | 2022-01-12T16:03:10.000Z | tools/cli_usage.py | mtag-dev/esg | f01e8caef23abb54ebc0db5279cbee3006dffa05 | [
"BSD-3-Clause"
] | 4 | 2022-01-07T11:46:36.000Z | 2022-01-31T00:23:41.000Z | tools/cli_usage.py | mtag-dev/esg | f01e8caef23abb54ebc0db5279cbee3006dffa05 | [
"BSD-3-Clause"
] | 1 | 2022-01-11T19:07:17.000Z | 2022-01-11T19:07:17.000Z | """
Look for a marker comment in docs pages, and place the output of
`$ esg --help` there. Pass `--check` to ensure the content is in sync.
"""
import argparse
import subprocess
import sys
import typing
from pathlib import Path
def _get_usage_lines() -> typing.List[str]:
res = subprocess.run(["esg", "--help"], stdout=subprocess.PIPE)
help_text = res.stdout.decode("utf-8")
return ["```", "$ esg --help", *help_text.splitlines(), "```"]
def _find_next_codefence_lineno(lines: typing.List[str], after: int) -> int:
return next(
lineno for lineno, line in enumerate(lines[after:], after) if line == "```"
)
def _get_insert_location(lines: typing.List[str]) -> typing.Tuple[int, int]:
marker = lines.index("<!-- :cli_usage: -->")
start = marker + 1
if lines[start] == "```":
# Already generated.
# <!-- :cli_usage: -->
# ``` <- start
# [...]
# ``` <- end
next_codefence = _find_next_codefence_lineno(lines, after=start + 1)
end = next_codefence + 1
else:
# Not generated yet.
end = start
return start, end
def _generate_cli_usage(path: Path, check: bool = False) -> int:
content = path.read_text()
lines = content.splitlines()
usage_lines = _get_usage_lines()
start, end = _get_insert_location(lines)
lines = lines[:start] + usage_lines + lines[end:]
output = "\n".join(lines) + "\n"
if check:
if content == output:
return 0
print(f"ERROR: CLI usage in {path} is out of sync. Run `make format` to fix.")
return 1
path.write_text(output)
return 0
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--check", action="store_true")
args = parser.parse_args()
paths = [Path("README.md"),]
rv = 0
for path in paths:
rv |= _generate_cli_usage(path, check=args.check)
sys.exit(rv)
| 27.464789 | 86 | 0.607692 |
4a1fcff2e3f524fbe92cf616797fb7b3c219dda7 | 352 | py | Python | docs/examples/sentinel.py | laggardkernel/aioredis-py | 9d71774d7fa5864160f0f3824feda154b2cc0e0e | [
"MIT"
] | 1,777 | 2015-01-25T17:26:56.000Z | 2021-03-18T08:39:55.000Z | docs/examples/sentinel.py | laggardkernel/aioredis-py | 9d71774d7fa5864160f0f3824feda154b2cc0e0e | [
"MIT"
] | 754 | 2015-02-22T13:36:08.000Z | 2021-03-18T23:11:21.000Z | docs/examples/sentinel.py | seedofjoy/aioredis | 2f217e114a395b73d77952172eab4abeaa0dd2ff | [
"MIT"
] | 314 | 2015-02-24T20:18:46.000Z | 2021-03-13T01:47:06.000Z | import asyncio
import aioredis.sentinel
async def main():
sentinel_client = aioredis.sentinel.Sentinel([("localhost", 26379)])
master_redis: aioredis.Redis = sentinel_client.master_for("mymaster")
info = await master_redis.sentinel_master("mymaster")
print("Master role:", info)
if __name__ == "__main__":
asyncio.run(main())
| 22 | 73 | 0.71875 |
4a1fcffc310ca5b692fa89d8b5ae81bbbfb80447 | 3,447 | py | Python | certbot-dns-dnsmadeeasy/certbot_dns_dnsmadeeasy/_internal/dns_dnsmadeeasy.py | silverbacknet/certbot | 270b5535e24fd3dab4c05fa8929adca8117942f1 | [
"Apache-2.0"
] | 5 | 2021-01-26T08:47:29.000Z | 2021-01-30T00:42:12.000Z | certbot-dns-dnsmadeeasy/certbot_dns_dnsmadeeasy/_internal/dns_dnsmadeeasy.py | silverbacknet/certbot | 270b5535e24fd3dab4c05fa8929adca8117942f1 | [
"Apache-2.0"
] | 2 | 2019-11-20T07:08:26.000Z | 2020-11-05T23:31:48.000Z | certbot-dns-dnsmadeeasy/certbot_dns_dnsmadeeasy/_internal/dns_dnsmadeeasy.py | silverbacknet/certbot | 270b5535e24fd3dab4c05fa8929adca8117942f1 | [
"Apache-2.0"
] | 1 | 2021-09-24T22:22:11.000Z | 2021-09-24T22:22:11.000Z | """DNS Authenticator for DNS Made Easy DNS."""
import logging
from lexicon.providers import dnsmadeeasy
import zope.interface
from certbot import errors
from certbot import interfaces
from certbot.plugins import dns_common
from certbot.plugins import dns_common_lexicon
logger = logging.getLogger(__name__)
ACCOUNT_URL = 'https://cp.dnsmadeeasy.com/account/info'
@zope.interface.implementer(interfaces.IAuthenticator)
@zope.interface.provider(interfaces.IPluginFactory)
class Authenticator(dns_common.DNSAuthenticator):
"""DNS Authenticator for DNS Made Easy
This Authenticator uses the DNS Made Easy API to fulfill a dns-01 challenge.
"""
description = ('Obtain certificates using a DNS TXT record (if you are using DNS Made Easy for '
'DNS).')
ttl = 60
def __init__(self, *args, **kwargs):
super(Authenticator, self).__init__(*args, **kwargs)
self.credentials = None
@classmethod
def add_parser_arguments(cls, add): # pylint: disable=arguments-differ
super(Authenticator, cls).add_parser_arguments(add, default_propagation_seconds=60)
add('credentials', help='DNS Made Easy credentials INI file.')
def more_info(self): # pylint: disable=missing-function-docstring
return 'This plugin configures a DNS TXT record to respond to a dns-01 challenge using ' + \
'the DNS Made Easy API.'
def _setup_credentials(self):
self.credentials = self._configure_credentials(
'credentials',
'DNS Made Easy credentials INI file',
{
'api-key': 'API key for DNS Made Easy account, obtained from {0}'
.format(ACCOUNT_URL),
'secret-key': 'Secret key for DNS Made Easy account, obtained from {0}'
.format(ACCOUNT_URL)
}
)
def _perform(self, domain, validation_name, validation):
self._get_dnsmadeeasy_client().add_txt_record(domain, validation_name, validation)
def _cleanup(self, domain, validation_name, validation):
self._get_dnsmadeeasy_client().del_txt_record(domain, validation_name, validation)
def _get_dnsmadeeasy_client(self):
return _DNSMadeEasyLexiconClient(self.credentials.conf('api-key'),
self.credentials.conf('secret-key'),
self.ttl)
class _DNSMadeEasyLexiconClient(dns_common_lexicon.LexiconClient):
"""
Encapsulates all communication with the DNS Made Easy via Lexicon.
"""
def __init__(self, api_key, secret_key, ttl):
super(_DNSMadeEasyLexiconClient, self).__init__()
config = dns_common_lexicon.build_lexicon_config('dnsmadeeasy', {
'ttl': ttl,
}, {
'auth_username': api_key,
'auth_token': secret_key,
})
self.provider = dnsmadeeasy.Provider(config)
def _handle_http_error(self, e, domain_name):
if domain_name in str(e) and str(e).startswith('404 Client Error: Not Found for url:'):
return None
hint = None
if str(e).startswith('403 Client Error: Forbidden for url:'):
hint = 'Are your API key and Secret key values correct?'
return errors.PluginError('Error determining zone identifier: {0}.{1}'
.format(e, ' ({0})'.format(hint) if hint else ''))
| 37.064516 | 100 | 0.64984 |
4a1fd0270d01f795724e8ea0f47bd48c033d8777 | 9,801 | py | Python | models/base_model.py | matansudry/DL_hw2 | 20f776cb6e41768e167d9d21af9afbd50cab60e6 | [
"MIT"
] | null | null | null | models/base_model.py | matansudry/DL_hw2 | 20f776cb6e41768e167d9d21af9afbd50cab60e6 | [
"MIT"
] | null | null | null | models/base_model.py | matansudry/DL_hw2 | 20f776cb6e41768e167d9d21af9afbd50cab60e6 | [
"MIT"
] | null | null | null | from abc import ABCMeta
import torch
from torch import nn, Tensor
import itertools as it
from torch.nn.utils.rnn import pack_padded_sequence
import torch.nn.init as init
import torch.nn.functional as F
class ResidualBlock(nn.Module):
"""
"""
def __init__(
self,
in_channels: int,
channels: list,
kernel_sizes: list,
batchnorm=False,
dropout=0.0,
activation_type: str = "relu",
activation_params: dict = {},
**kwargs,
):
super().__init__()
assert channels and kernel_sizes
assert len(channels) == len(kernel_sizes)
assert all(map(lambda x: x % 2 == 1, kernel_sizes))
self.main_path, self.shortcut_path = None, None
main_layers = []
# - extract number of conv layers
N = len(channels)
# - first conv layer
main_layers.append(
nn.Conv2d(
in_channels,
channels[0],
kernel_size= kernel_sizes[0],
padding=(int((kernel_sizes[0]-1)/2),
int((kernel_sizes[0]-1)/2)), bias=True))
if dropout !=0:
main_layers.append(torch.nn.Dropout2d(p=dropout))
if batchnorm == True:
main_layers.append(torch.nn.BatchNorm2d(channels[0], eps=1e-05, momentum=0.1, affine=True, track_running_stats=True))
main_layers.append(nn.ReLU(inplace=True))
#middle layers
for i in range(1,N-1):
main_layers.append(
nn.Conv2d(
channels[i-1],
channels[i],
kernel_size= kernel_sizes[i],
padding=(int((kernel_sizes[i]-1)/2),
int((kernel_sizes[i]-1)/2)), bias=True))
if dropout !=0:
main_layers.append(torch.nn.Dropout2d(p=dropout))
if batchnorm == True:
main_layers.append(torch.nn.BatchNorm2d(channels[i], eps=1e-05, momentum=0.1, affine=True, track_running_stats=True))
if (i%2 == 1):
main_layers.append(nn.ReLU(inplace=True))
if N > 1:
main_layers.append(
nn.Conv2d(
channels[N-2],
channels[N-1],
kernel_size= kernel_sizes[N-1],
padding=(int((kernel_sizes[N-1]-1)/2),
int((kernel_sizes[N-1]-1)/2)), bias=True))
self.main_path = nn.Sequential(*main_layers)
def forward(self, x):
out = self.main_path(x)
relu = torch.nn.ReLU()
out = relu(out)
return out
class ResNetClassifier(nn.Module):
def __init__(
self,
in_size,
channels,
img_encoder_out_classes,
pool_every,
activation_type: str = "relu",
activation_params: dict = {},
pooling_type: str = "max",
pooling_params: dict = {},
batchnorm=False,
dropout=0.0,
**kwargs,
):
"""
See arguments of ConvClassifier & ResidualBlock.
"""
super().__init__()
self.batchnorm = batchnorm
self.dropout = dropout
self.conv_params=dict(kernel_size=3, stride=1, padding=1)
self.in_size = in_size
self.channels = channels
self.img_encoder_out_classes= img_encoder_out_classes
self.pool_every = pool_every
self.activation_type = activation_type
self.activation_params = activation_params
self.pooling_type = pooling_type
self.pooling_params = pooling_params
self.feature_extractor = self._make_feature_extractor()
self.fc1 = nn.Linear(512, 1000, bias=True)
self.fc2 = nn.Linear(1000, self.img_encoder_out_classes, bias=True)
self.relu = nn.ReLU(inplace=True)
def _make_feature_extractor(self):
in_channels, in_h, in_w, = tuple(self.in_size)
layers = []
# - extract number of conv layers
N = len(self.channels)
#1st layer
temp_in_channels = 64
temp_channels = []
temp_kernel_sizes = []
layers.append(nn.Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False))
layers.append(torch.nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False))
#middle layers
for i in range(1,N):
temp_channels.append(self.channels[i-1])
temp_kernel_sizes.append(3)
if ((i % self.pool_every)==0 and i!=0):
layers.append(
ResidualBlock(
in_channels=temp_in_channels,
channels=temp_channels,
kernel_sizes=temp_kernel_sizes,
batchnorm= self.batchnorm,
dropout= self.dropout,
activation_type= self.activation_type))
temp_in_channels = self.channels[i-1]
temp_channels = []
temp_kernel_sizes = []
temp_channels.append(self.channels[N-1])
temp_kernel_sizes.append(3)
layers.append(ResidualBlock(
in_channels=temp_in_channels,
channels=temp_channels,
kernel_sizes=temp_kernel_sizes,
batchnorm= self.batchnorm,
dropout= self.dropout,
activation_type= self.activation_type))
if ((N % self.pool_every)==0):
layers.append(torch.nn.AdaptiveAvgPool2d(output_size=(1, 1)))
seq = nn.Sequential(*layers)
return seq
def forward(self, x):
out = self.feature_extractor(x)
batch_size = out.shape[0]
out = out.view(batch_size, -1)
out = self.fc1(out)
out = self.relu(out)
out = self.fc2(out)
return out
class TextProcessor(nn.Module):
def __init__(self, embedding_tokens, embedding_features, lstm_features, drop=0.0):
super(TextProcessor, self).__init__()
self.embedding = nn.Embedding(embedding_tokens, embedding_features, padding_idx=0)
self.drop = nn.Dropout(drop)
self.tanh = nn.Tanh()
self.lstm = nn.LSTM(input_size=embedding_features,
hidden_size=1024,
num_layers=2)
self._init_lstm(self.lstm.weight_ih_l0)
self._init_lstm(self.lstm.weight_hh_l0)
self.lstm.bias_ih_l0.data.zero_()
self.lstm.bias_hh_l0.data.zero_()
self.fc = nn.Linear(1024, lstm_features, bias=True)
init.xavier_uniform(self.embedding.weight)
def _init_lstm(self, weight):
for w in weight.chunk(4, 0):
init.xavier_uniform(w)
def forward(self, ques):
embedded = self.embedding(ques)
packed = self.tanh(self.drop(embedded))
packed = packed .transpose(0, 1)
_, (h, c) = self.lstm(packed)
packed = torch.cat((h, c), 2)
packed = packed.transpose(0, 1)
packed = packed.reshape(packed.size()[0], -1)
return packed
class MyModel(nn.Module, metaclass=ABCMeta):
def __init__(
self, image_in_size=((3,224,224)), img_encoder_out_classes=1024, img_encoder_channels=[32, 128, 512, 1024],
img_encoder_batchnorm=True, img_encoder_dropout=0.5, text_embedding_tokens=15193, text_embedding_features=100,
text_lstm_features=512, text_dropout=0.5, classifier_dropout=0.5, classifier_mid_features=128,classifier_out_classes=2410
):
super(MyModel, self).__init__()
self.image_in_size=image_in_size
self.img_encoder_out_classes=img_encoder_out_classes
self.img_encoder_channels=img_encoder_channels
self.img_encoder_batchnorm=img_encoder_batchnorm
self.img_encoder_dropout=img_encoder_dropout
self.text_embedding_tokens=text_embedding_tokens
self.text_embedding_features=text_embedding_features
self.text_lstm_features=text_lstm_features
self.text_dropout=text_dropout
self.classifier_dropout=classifier_dropout
self.classifier_mid_features=classifier_mid_features
self.classifier_out_classes=classifier_out_classes
self.dropout = nn.Dropout(self.classifier_dropout)
self.fc1 = nn.Linear(self.img_encoder_out_classes, self.classifier_mid_features)
self.fc2 = nn.Linear(self.classifier_mid_features, self.classifier_out_classes)
self.relu = nn.ReLU(inplace=True)
self.img_encoder = ResNetClassifier(
in_size=image_in_size,
channels=img_encoder_channels,
img_encoder_out_classes=self.img_encoder_out_classes,
pool_every=1,
activation_type='relu',
activation_params=dict(),
pooling_type='avg',
pooling_params=dict(kernel_size=3),
batchnorm=img_encoder_batchnorm,
dropout=img_encoder_dropout,
)
self.text = TextProcessor(
embedding_tokens=text_embedding_tokens,
embedding_features=text_embedding_features,
lstm_features=text_lstm_features,
drop=text_dropout,
)
def forward(self, x) -> Tensor:
img = x[0]
ques = x[1]
img = self.img_encoder(img)
ques = self.text(ques)
combined = torch.mul(ques, img)
combined = self.relu(combined)
combined = self.dropout(combined)
combined = self.fc1(combined)
combined = self.relu(combined)
combined = self.dropout(combined)
return self.fc2(combined) | 37.408397 | 133 | 0.59851 |
4a1fd0cdf84ad40719b58c62c0b99b82b790cb89 | 4,141 | py | Python | examples/classify/semi_supervised/img/scripts/create_split.py | parmarsuraj99/objax | 111cd78960f5812885505b5ec02552b98a789973 | [
"Apache-2.0"
] | 2 | 2021-02-23T18:23:40.000Z | 2022-03-09T09:38:37.000Z | examples/classify/semi_supervised/img/scripts/create_split.py | parmarsuraj99/objax | 111cd78960f5812885505b5ec02552b98a789973 | [
"Apache-2.0"
] | null | null | null | examples/classify/semi_supervised/img/scripts/create_split.py | parmarsuraj99/objax | 111cd78960f5812885505b5ec02552b98a789973 | [
"Apache-2.0"
] | 1 | 2020-09-20T23:56:29.000Z | 2020-09-20T23:56:29.000Z | #!/usr/bin/env python
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to create SSL splits from a dataset.
"""
import json
import os
from collections import defaultdict
import numpy as np
import tensorflow as tf
from absl import app, flags
from tqdm import trange, tqdm
from examples.classify.semi_supervised.img.libml.data import core
flags.DEFINE_integer('seed', 0, 'Random seed to use, 0 for no shuffling.')
flags.DEFINE_integer('size', 0, 'Size of labelled set.')
FLAGS = flags.FLAGS
def get_class(serialized_example):
return tf.io.parse_single_example(serialized_example,
features={'label': tf.io.FixedLenFeature([], tf.int64)})['label']
def main(argv):
assert FLAGS.size
argv.pop(0)
if any(not tf.io.gfile.exists(f) for f in argv[1:]):
raise FileNotFoundError(argv[1:])
target = '%s.%d@%d' % (argv[0], FLAGS.seed, FLAGS.size)
if tf.io.gfile.exists(target):
raise FileExistsError('For safety overwriting is not allowed', target)
input_files = argv[1:]
count = 0
id_class = []
class_id = defaultdict(list)
print('Computing class distribution')
dataset = tf.data.TFRecordDataset(input_files).map(get_class, 4).batch(1 << 10)
for it in dataset:
with tqdm(leave=False) as t:
for i in it:
id_class.append(i.numpy())
class_id[i.numpy()].append(count)
count += 1
t.update(it.shape[0])
print('%d records found' % count)
nclass = len(class_id)
assert min(class_id.keys()) == 0 and max(class_id.keys()) == (nclass - 1)
train_stats = np.array([len(class_id[i]) for i in range(nclass)], np.float64)
train_stats /= train_stats.max()
if 'stl10' in argv[1]:
# All of the unlabeled data is given label 0, but we know that
# STL has equally distributed data among the 10 classes.
train_stats[:] = 1
print(' Stats', ' '.join(['%.2f' % (100 * x) for x in train_stats]))
assert min(class_id.keys()) == 0 and max(class_id.keys()) == (nclass - 1)
class_id = [np.array(class_id[i], dtype=np.int64) for i in range(nclass)]
if FLAGS.seed:
np.random.seed(FLAGS.seed)
for i in range(nclass):
np.random.shuffle(class_id[i])
# Distribute labels to match the input distribution.
npos = np.zeros(nclass, np.int64)
label = []
for i in range(FLAGS.size):
c = np.argmax(train_stats - npos / max(npos.max(), 1))
label.append(class_id[c][npos[c]])
npos[c] += 1
del npos, class_id
label = frozenset([int(x) for x in label])
if 'stl10' in argv[1] and FLAGS.size == 1000:
data = tf.io.gfile.GFile(os.path.join(core.DATA_DIR, 'stl10_fold_indices.txt'), 'r').read()
label = frozenset(list(map(int, data.split('\n')[FLAGS.seed].split())))
print('Creating split in %s' % target)
tf.io.gfile.makedirs(os.path.dirname(target))
with tf.io.TFRecordWriter(target + '-label.tfrecord') as writer_label:
pos, loop = 0, trange(count, desc='Writing records')
for input_file in input_files:
for record in tf.compat.v1.python_io.tf_record_iterator(input_file):
if pos in label:
writer_label.write(record)
pos += 1
loop.update()
loop.close()
with tf.io.gfile.GFile(target + '-label.json', 'w') as writer:
writer.write(json.dumps(dict(distribution=train_stats.tolist(), label=sorted(label)), indent=2, sort_keys=True))
if __name__ == '__main__':
app.run(main)
| 36.973214 | 120 | 0.64453 |
4a1fd20afc0cf18c151b13e5309a361277d12563 | 64,318 | py | Python | python/paddle/fluid/incubate/fleet/utils/fleet_util.py | pyqt1/MyPaddle | 72ef733ad58b958c519b9db9a44e8716a82401d9 | [
"Apache-2.0"
] | 1 | 2020-02-21T02:26:47.000Z | 2020-02-21T02:26:47.000Z | python/paddle/fluid/incubate/fleet/utils/fleet_util.py | pyqt1/MyPaddle | 72ef733ad58b958c519b9db9a44e8716a82401d9 | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/incubate/fleet/utils/fleet_util.py | pyqt1/MyPaddle | 72ef733ad58b958c519b9db9a44e8716a82401d9 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fleet Utils."""
import collections
import json
import logging
import math
import numpy as np
import os
import sys
import time
import paddle.fluid as fluid
from paddle.fluid.log_helper import get_logger
from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet as fleet_pslib
from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet as fleet_transpiler
from . import hdfs
from .hdfs import *
from . import utils
__all__ = ["FleetUtil"]
_logger = get_logger(
__name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s')
fleet = fleet_pslib
class FleetUtil(object):
"""
FleetUtil provides some common functions for users' convenience.
Examples:
.. code-block:: python
from paddle.fluid.incubate.fleet.utils.fleet_util import FleetUtil
fleet_util = FleetUtil()
fleet_util.rank0_print("my log")
"""
def __init__(self, mode="pslib"):
global fleet
if mode == "pslib":
fleet = fleet_pslib
elif mode == "transpiler":
fleet = fleet_transpiler
else:
raise ValueError(
"Please choose one mode from [\"pslib\", \"transpiler\"]")
def rank0_print(self, s):
"""
Worker of rank 0 print some log.
Args:
s(str): string to print
Examples:
.. code-block:: python
from paddle.fluid.incubate.fleet.utils.fleet_util import FleetUtil
fleet_util = FleetUtil()
fleet_util.rank0_print("my log")
"""
if fleet.worker_index() != 0:
return
print(s)
sys.stdout.flush()
def rank0_info(self, s):
"""
Worker of rank 0 print some log info.
Args:
s(str): string to log
Examples:
.. code-block:: python
from paddle.fluid.incubate.fleet.utils.fleet_util import FleetUtil
fleet_util = FleetUtil()
fleet_util.rank0_info("my log info")
"""
if fleet.worker_index() != 0:
return
_logger.info(s)
def rank0_error(self, s):
"""
Worker of rank 0 print some log error.
Args:
s(str): string to log
Examples:
.. code-block:: python
from paddle.fluid.incubate.fleet.utils.fleet_util import FleetUtil
fleet_util = FleetUtil()
fleet_util.rank0_error("my log error")
"""
if fleet.worker_index() != 0:
return
_logger.error(s)
def set_zero(self,
var_name,
scope=fluid.global_scope(),
place=fluid.CPUPlace(),
param_type="int64"):
"""
Set tensor of a Variable to zero.
Args:
var_name(str): name of Variable
scope(Scope): Scope object, default is fluid.global_scope()
place(Place): Place object, default is fluid.CPUPlace()
param_type(str): param data type, default is int64
Examples:
.. code-block:: python
from paddle.fluid.incubate.fleet.utils.fleet_util import FleetUtil
fleet_util = FleetUtil()
fleet_util.set_zero(myvar.name, myscope)
"""
param = scope.var(var_name).get_tensor()
param_array = np.zeros(param._get_dims()).astype(param_type)
param.set(param_array, place)
def print_global_auc(self,
scope=fluid.global_scope(),
stat_pos="_generated_var_2",
stat_neg="_generated_var_3",
print_prefix=""):
"""
Print global auc of all distributed workers.
Args:
scope(Scope): Scope object, default is fluid.global_scope()
stat_pos(str): name of auc pos bucket Variable
stat_neg(str): name of auc neg bucket Variable
print_prefix(str): prefix of print auc
Examples:
.. code-block:: python
from paddle.fluid.incubate.fleet.utils.fleet_util import FleetUtil
fleet_util = FleetUtil()
fleet_util.print_global_auc(myscope, stat_pos=stat_pos.name,
stat_neg=stat_neg.name)
# below is part of model
emb = my_slot_net(slots, label) # emb can be fc layer of size 1
similarity_norm = fluid.layers.sigmoid(fluid.layers.clip(\
emb, min=-15.0, max=15.0), name="similarity_norm")\
binary_predict = fluid.layers.concat(input=[\
fluid.layers.elementwise_sub(\
fluid.layers.ceil(similarity_norm), similarity_norm),\
similarity_norm], axis=1)
auc, batch_auc, [batch_stat_pos, batch_stat_neg, stat_pos, \
stat_neg] = fluid.layers.auc(input=binary_predict,\
label=label, curve='ROC',\
num_thresholds=4096)
"""
auc_value = self.get_global_auc(scope, stat_pos, stat_neg)
self.rank0_print(print_prefix + " global auc = %s" % auc_value)
def get_global_auc(self,
scope=fluid.global_scope(),
stat_pos="_generated_var_2",
stat_neg="_generated_var_3"):
"""
Get global auc of all distributed workers.
Args:
scope(Scope): Scope object, default is fluid.global_scope()
stat_pos(str): name of auc pos bucket Variable
stat_neg(str): name of auc neg bucket Variable
Returns:
auc_value(float), total_ins_num(int)
Examples:
.. code-block:: python
from paddle.fluid.incubate.fleet.utils.fleet_util import FleetUtil
fleet_util = FleetUtil()
auc_value, _ = fleet_util.get_global_auc(myscope,
stat_pos=stat_pos,
stat_neg=stat_neg)
"""
if scope.find_var(stat_pos) is None or scope.find_var(stat_neg) is None:
self.rank0_print("not found auc bucket")
return None
fleet._role_maker._barrier_worker()
# auc pos bucket
pos = np.array(scope.find_var(stat_pos).get_tensor())
# auc pos bucket shape
old_pos_shape = np.array(pos.shape)
# reshape to one dim
pos = pos.reshape(-1)
global_pos = np.copy(pos) * 0
# mpi allreduce
fleet._role_maker._all_reduce(pos, global_pos)
# reshape to its original shape
global_pos = global_pos.reshape(old_pos_shape)
# auc neg bucket
neg = np.array(scope.find_var(stat_neg).get_tensor())
old_neg_shape = np.array(neg.shape)
neg = neg.reshape(-1)
global_neg = np.copy(neg) * 0
fleet._role_maker._all_reduce(neg, global_neg)
global_neg = global_neg.reshape(old_neg_shape)
# calculate auc
num_bucket = len(global_pos[0])
area = 0.0
pos = 0.0
neg = 0.0
new_pos = 0.0
new_neg = 0.0
total_ins_num = 0
for i in xrange(num_bucket):
index = num_bucket - 1 - i
new_pos = pos + global_pos[0][index]
total_ins_num += global_pos[0][index]
new_neg = neg + global_neg[0][index]
total_ins_num += global_neg[0][index]
area += (new_neg - neg) * (pos + new_pos) / 2
pos = new_pos
neg = new_neg
auc_value = None
if pos * neg == 0 or total_ins_num == 0:
auc_value = 0.5
else:
auc_value = area / (pos * neg)
fleet._role_maker._barrier_worker()
return auc_value
def load_fleet_model_one_table(self, table_id, path):
"""
load pslib model to one table
Args:
table_id(int): load model to one table, default is None, which mean
load all table.
path(str): model path
Examples:
.. code-block:: python
from paddle.fluid.incubate.fleet.utils.fleet_util import FleetUtil
fleet_util = FleetUtil()
fleet_util.load_fleet_model("hdfs:/my/model/path", table_id=1)
"""
fleet.load_one_table(table_id, path)
def load_fleet_model(self, path, mode=0):
"""
load pslib model
Args:
path(str): model path
mode(str): 0 or 1, which means load checkpoint or delta model,
default is 0
Examples:
.. code-block:: python
from paddle.fluid.incubate.fleet.utils.fleet_util import FleetUtil
fleet_util = FleetUtil()
fleet_util.load_fleet_model("hdfs:/my/model/path")
fleet_util.load_fleet_model("hdfs:/my/model/path", mode=0)
"""
fleet.init_server(path, mode=mode)
def save_fleet_model(self, path, mode=0):
"""
save pslib model
Args:
path(str): model path
mode(str): 0 or 1, which means save checkpoint or delta model,
default is 0
Examples:
.. code-block:: python
from paddle.fluid.incubate.fleet.utils.fleet_util import FleetUtil
fleet_util = FleetUtil()
fleet_util.save_fleet_model("hdfs:/my/model/path")
"""
fleet.save_persistables(None, path, mode=mode)
def _get_xbox_str(self,
output_path,
day,
model_path,
xbox_base_key,
data_path,
hadoop_fs_name,
monitor_data={},
mode="patch"):
xbox_dict = collections.OrderedDict()
if mode == "base":
xbox_dict["id"] = str(xbox_base_key)
elif mode == "patch":
xbox_dict["id"] = str(int(time.time()))
else:
print("warning: unknown mode %s, set it to patch" % mode)
mode = "patch"
xbox_dict["id"] = str(int(time.time()))
xbox_dict["key"] = str(xbox_base_key)
if model_path.startswith("hdfs:") or model_path.startswith("afs:"):
model_path = model_path[model_path.find(":") + 1:]
xbox_dict["input"] = hadoop_fs_name + model_path.rstrip("/") + "/000"
xbox_dict["record_count"] = "111111"
xbox_dict["partition_type"] = "2"
xbox_dict["job_name"] = "default_job_name"
xbox_dict["ins_tag"] = "feasign"
xbox_dict["ins_path"] = data_path
job_id_with_host = os.popen("echo -n ${JOB_ID}").read().strip()
instance_id = os.popen("echo -n ${INSTANCE_ID}").read().strip()
start_pos = instance_id.find(job_id_with_host)
end_pos = instance_id.find("--")
if start_pos != -1 and end_pos != -1:
job_id_with_host = instance_id[start_pos:end_pos]
xbox_dict["job_id"] = job_id_with_host
# currently hard code here, set monitor_data empty string
xbox_dict["monitor_data"] = ""
xbox_dict["monitor_path"] = output_path.rstrip("/") + "/monitor/" \
+ day + ".txt"
xbox_dict["mpi_size"] = str(fleet.worker_num())
return json.dumps(xbox_dict)
def write_model_donefile(self,
output_path,
day,
pass_id,
xbox_base_key,
hadoop_fs_name,
hadoop_fs_ugi,
hadoop_home="$HADOOP_HOME",
donefile_name="donefile.txt"):
"""
write donefile when save model
Args:
output_path(str): output path
day(str|int): training day
pass_id(str|int): training pass id
xbox_base_key(str|int): xbox base key
hadoop_fs_name(str): hdfs/afs fs name
hadoop_fs_ugi(str): hdfs/afs fs ugi
hadoop_home(str): hadoop home, default is "$HADOOP_HOME"
donefile_name(str): donefile name, default is "donefile.txt"
Examples:
.. code-block:: python
from paddle.fluid.incubate.fleet.utils.fleet_util import FleetUtil
fleet_util = FleetUtil()
fleet_util.write_model_donefile(output_path="hdfs:/my/output",
model_path="hdfs:/my/model",
day=20190723,
pass_id=66,
xbox_base_key=int(time.time()),
hadoop_fs_name="hdfs://xxx",
hadoop_fs_ugi="user,passwd")
"""
day = str(day)
pass_id = str(pass_id)
xbox_base_key = int(xbox_base_key)
if pass_id != "-1":
suffix_name = "/%s/%s/" % (day, pass_id)
model_path = output_path.rstrip("/") + suffix_name
else:
suffix_name = "/%s/0/" % day
model_path = output_path.rstrip("/") + suffix_name
if fleet.worker_index() == 0:
donefile_path = output_path + "/" + donefile_name
content = "%s\t%lu\t%s\t%s\t%d" % (day, xbox_base_key,\
model_path, pass_id, 0)
configs = {
"fs.default.name": hadoop_fs_name,
"hadoop.job.ugi": hadoop_fs_ugi
}
client = HDFSClient(hadoop_home, configs)
if client.is_file(donefile_path):
pre_content = client.cat(donefile_path)
pre_content_list = pre_content.split("\n")
day_list = [i.split("\t")[0] for i in pre_content_list]
pass_list = [i.split("\t")[3] for i in pre_content_list]
exist = False
for i in range(len(day_list)):
if int(day) == int(day_list[i]) and \
int(pass_id) == int(pass_list[i]):
exist = True
break
if not exist:
with open(donefile_name, "w") as f:
f.write(pre_content + "\n")
f.write(content + "\n")
client.delete(donefile_path)
client.upload(
output_path,
donefile_name,
multi_processes=1,
overwrite=False)
self.rank0_error("write %s/%s %s succeed" % \
(day, pass_id, donefile_name))
else:
self.rank0_error("not write %s because %s/%s already "
"exists" % (donefile_name, day, pass_id))
else:
with open(donefile_name, "w") as f:
f.write(content + "\n")
client.upload(
output_path,
donefile_name,
multi_processes=1,
overwrite=False)
self.rank0_error("write %s/%s %s succeed" % \
(day, pass_id, donefile_name))
fleet._role_maker._barrier_worker()
def write_xbox_donefile(self,
output_path,
day,
pass_id,
xbox_base_key,
data_path,
hadoop_fs_name,
hadoop_fs_ugi,
monitor_data={},
hadoop_home="$HADOOP_HOME",
donefile_name=None):
"""
write delta donefile or xbox base donefile
Args:
output_path(str): output path
day(str|int): training day of model
pass_id(str|int): training pass id of model
xbox_base_key(str|int): xbox base key
data_path(str|list): training data path
hadoop_fs_name(str): hdfs/afs fs name
hadoop_fs_ugi(str): hdfs/afs fs ugi
monitor_data(dict): metrics
hadoop_home(str): hadoop home, default is "$HADOOP_HOME"
donefile_name(str): donefile name, default is None"
Examples:
.. code-block:: python
from paddle.fluid.incubate.fleet.utils.fleet_util import FleetUtil
fleet_util = FleetUtil()
fleet_util.write_xbox_donefile(
output_path="hdfs:/my/output/",
model_path="hdfs:/my/output/20190722/01",
day=20190722,
pass_id=1,
xbox_base_key=int(time.time()),
data_path="hdfs:/my/data/",
hadoop_fs_name="hdfs://xxx",
hadoop_fs_ugi="user,passwd",
monitor_data={}
)
"""
day = str(day)
pass_id = str(pass_id)
xbox_base_key = int(xbox_base_key)
mode = None
if pass_id != "-1":
mode = "patch"
suffix_name = "/%s/delta-%s/" % (day, pass_id)
model_path = output_path.rstrip("/") + suffix_name
if donefile_name is None:
donefile_name = "xbox_patch_done.txt"
else:
mode = "base"
suffix_name = "/%s/base/" % day
model_path = output_path.rstrip("/") + suffix_name
if donefile_name is None:
donefile_name = "xbox_base_done.txt"
if isinstance(data_path, list):
data_path = ",".join(data_path)
if fleet.worker_index() == 0:
donefile_path = output_path + "/" + donefile_name
xbox_str = self._get_xbox_str(output_path, day, model_path, \
xbox_base_key, data_path, hadoop_fs_name, monitor_data={},
mode=mode)
configs = {
"fs.default.name": hadoop_fs_name,
"hadoop.job.ugi": hadoop_fs_ugi
}
client = HDFSClient(hadoop_home, configs)
if client.is_file(donefile_path):
pre_content = client.cat(donefile_path)
last_dict = json.loads(pre_content.split("\n")[-1])
last_day = last_dict["input"].split("/")[-3]
last_pass = last_dict["input"].split("/")[-2].split("-")[-1]
exist = False
if int(day) < int(last_day) or \
int(day) == int(last_day) and \
int(pass_id) <= int(last_pass):
exist = True
if not exist:
with open(donefile_name, "w") as f:
f.write(pre_content + "\n")
f.write(xbox_str + "\n")
client.delete(donefile_path)
client.upload(
output_path,
donefile_name,
multi_processes=1,
overwrite=False)
self.rank0_error("write %s/%s %s succeed" % \
(day, pass_id, donefile_name))
else:
self.rank0_error("not write %s because %s/%s already "
"exists" % (donefile_name, day, pass_id))
else:
with open(donefile_name, "w") as f:
f.write(xbox_str + "\n")
client.upload(
output_path,
donefile_name,
multi_processes=1,
overwrite=False)
self.rank0_error("write %s/%s %s succeed" % \
(day, pass_id, donefile_name))
fleet._role_maker._barrier_worker()
def write_cache_donefile(self,
output_path,
day,
pass_id,
key_num,
hadoop_fs_name,
hadoop_fs_ugi,
hadoop_home="$HADOOP_HOME",
donefile_name="sparse_cache.meta",
**kwargs):
"""
write cache donefile
Args:
output_path(str): output path
day(str|int): training day of model
pass_id(str|int): training pass id of model
key_num(str|int): save cache return value
hadoop_fs_name(str): hdfs/afs fs name
hadoop_fs_ugi(str): hdfs/afs fs ugi
hadoop_home(str): hadoop home, default is "$HADOOP_HOME"
donefile_name(str): donefile name, default is "sparse_cache.meta"
kwargs(dict): user defined properties
file_num(int): cache file num
table_id(int): cache table id
Examples:
.. code-block:: python
from paddle.fluid.incubate.fleet.utils.fleet_util import FleetUtil
fleet_util = FleetUtil()
fleet_util.write_cache_donefile(
output_path="hdfs:/my/output/",
day=20190722,
pass_id=1,
key_num=123456,
hadoop_fs_name="hdfs://xxx",
hadoop_fs_ugi="user,passwd",
)
"""
day = str(day)
pass_id = str(pass_id)
key_num = int(key_num)
file_num = kwargs.get("file_num", 16)
table_id = kwargs.get("table_id", 0)
if pass_id != "-1":
suffix_name = "/%s/delta-%s/%03d_cache" % (day, pass_id, table_id)
model_path = output_path.rstrip("/") + suffix_name
else:
suffix_name = "/%s/base/%03d_cache" % (day, table_id)
model_path = output_path.rstrip("/") + suffix_name
if fleet.worker_index() == 0:
donefile_path = model_path + "/" + donefile_name
configs = {
"fs.default.name": hadoop_fs_name,
"hadoop.job.ugi": hadoop_fs_ugi
}
client = HDFSClient(hadoop_home, configs)
if client.is_file(donefile_path):
self.rank0_error( \
"not write because %s already exists" % donefile_path)
else:
meta_str = "file_prefix:part\npart_num:%s\nkey_num:%d\n" \
% (file_num, key_num)
with open(donefile_name, "w") as f:
f.write(meta_str)
client.upload(
model_path,
donefile_name,
multi_processes=1,
overwrite=False)
self.rank0_error("write %s succeed" % donefile_path)
fleet._role_maker._barrier_worker()
def load_model(self, output_path, day, pass_id):
"""
load pslib model
Args:
output_path(str): output path
day(str|int): training day
pass_id(str|int): training pass id
Examples:
.. code-block:: python
from paddle.fluid.incubate.fleet.utils.fleet_util import FleetUtil
fleet_util = FleetUtil()
fleet_util.load_model("hdfs:/my/path", 20190722, 88)
"""
day = str(day)
pass_id = str(pass_id)
suffix_name = "/%s/%s/" % (day, pass_id)
load_path = output_path + suffix_name
self.rank0_error("going to load_model %s" % load_path)
self.load_fleet_model(load_path)
self.rank0_error("load_model done")
def save_model(self, output_path, day, pass_id):
"""
save pslib model
Args:
output_path(str): output path
day(str|int): training day
pass_id(str|int): training pass id
Examples:
.. code-block:: python
from paddle.fluid.incubate.fleet.utils.fleet_util import FleetUtil
fleet_util = FleetUtil()
fleet_util.save_model("hdfs:/my/path", 20190722, 88)
"""
day = str(day)
pass_id = str(pass_id)
suffix_name = "/%s/%s/" % (day, pass_id)
model_path = output_path + suffix_name
self.rank0_print("going to save_model %s" % model_path)
self.save_fleet_model(model_path)
self.rank0_print("save_model done")
def save_batch_model(self, output_path, day):
"""
save batch model
Args:
output_path(str): output path
day(str|int): training day
Examples:
.. code-block:: python
from paddle.fluid.incubate.fleet.utils.fleet_util import FleetUtil
fleet_util = FleetUtil()
fleet_util.save_batch_model("hdfs:/my/path", 20190722)
"""
day = str(day)
suffix_name = "/%s/0/" % day
model_path = output_path + suffix_name
self.rank0_print("going to save_model %s" % model_path)
fleet.save_persistables(None, model_path, mode=3)
self.rank0_print("save_batch_model done")
def save_delta_model(self, output_path, day, pass_id):
"""
save delta model
Args:
output_path(str): output path
day(str|int): training day
pass_id(str|int): training pass id
Examples:
.. code-block:: python
from paddle.fluid.incubate.fleet.utils.fleet_util import FleetUtil
fleet_util = FleetUtil()
fleet_util.save_batch_model("hdfs:/my/path", 20190722, 88)
"""
day = str(day)
pass_id = str(pass_id)
suffix_name = "/%s/delta-%s/" % (day, pass_id)
model_path = output_path + suffix_name
self.rank0_print("going to save_delta_model %s" % model_path)
fleet.save_persistables(None, model_path, mode=1)
self.rank0_print("save_delta_model done")
def save_xbox_base_model(self, output_path, day):
"""
save xbox base model
Args:
output_path(str): output path
day(str|int): training day
Examples:
.. code-block:: python
from paddle.fluid.incubate.fleet.utils.fleet_util import FleetUtil
fleet_util = FleetUtil()
fleet_util.save_xbox_base_model("hdfs:/my/path", 20190722, 88)
"""
day = str(day)
suffix_name = "/%s/base/" % day
model_path = output_path + suffix_name
self.rank0_print("going to save_xbox_base_model " + model_path)
fleet.save_persistables(None, model_path, mode=2)
self.rank0_print("save_xbox_base_model done")
def save_cache_model(self, output_path, day, pass_id, mode=1, **kwargs):
"""
save cache model
Args:
output_path(str): output path
day(str|int): training day
pass_id(str|int): training pass id
mode(str|int): save mode
kwargs(dict): user defined properties
table_id(int): table id to save cache
Returns:
key_num(int): cache key num
Examples:
.. code-block:: python
from paddle.fluid.incubate.fleet.utils.fleet_util import FleetUtil
fleet_util = FleetUtil()
fleet_util.save_cache_model("hdfs:/my/path", 20190722, 88)
"""
day = str(day)
pass_id = str(pass_id)
mode = int(mode)
table_id = kwargs.get("table_id", 0)
suffix_name = "/%s/delta-%s" % (day, pass_id)
model_path = output_path.rstrip("/") + suffix_name
self.rank0_print("going to save_cache_model %s" % model_path)
key_num = fleet.save_cache_model(
None, model_path, mode=mode, table_id=table_id)
self.rank0_print("save_cache_model done")
return key_num
def save_cache_base_model(self, output_path, day, **kwargs):
"""
save cache model
Args:
output_path(str): output path
day(str|int): training day
pass_id(str|int): training pass id
kwargs(dict): user defined properties
table_id(int): table id to save cache
Returns:
key_num(int): cache key num
Examples:
.. code-block:: python
from paddle.fluid.incubate.fleet.utils.fleet_util import FleetUtil
fleet_util = FleetUtil()
fleet_util.save_cache_base_model("hdfs:/my/path", 20190722)
"""
day = str(day)
table_id = kwargs.get("table_id", 0)
suffix_name = "/%s/base" % day
model_path = output_path.rstrip("/") + suffix_name
self.rank0_print("going to save_cache_base_model %s" % model_path)
key_num = fleet.save_cache_model(
None, model_path, mode=2, table_id=table_id)
self.rank0_print("save_cache_base_model done")
return key_num
def pull_all_dense_params(self, scope, program):
"""
pull all dense params in trainer of rank 0
Args:
scope(Scope): fluid Scope
program(Program): fluid Program
Examples:
.. code-block:: python
from paddle.fluid.incubate.fleet.utils.fleet_util import FleetUtil
fleet_util = FleetUtil()
fleet_util.pull_all_dense_params(my_scope, my_program)
"""
fleet._role_maker._barrier_worker()
if fleet._role_maker.is_first_worker():
prog_id = str(id(program))
tables = fleet._opt_info["program_id_to_worker"][prog_id].\
get_desc().dense_table
prog_conf = fleet._opt_info['program_configs'][prog_id]
prog_tables = {}
for key in prog_conf:
if "dense" not in key:
continue
for table_id in prog_conf[key]:
prog_tables[int(table_id)] = 0
for table in tables:
if int(table.table_id) not in prog_tables:
continue
var_name_list = []
for i in range(0, len(table.dense_variable_name)):
var_name = table.dense_variable_name[i]
if scope.find_var(var_name) is None:
raise ValueError("var " + var_name +
" not found in scope " +
"when pull dense")
var_name_list.append(var_name)
fleet._fleet_ptr.pull_dense(scope,
int(table.table_id), var_name_list)
fleet._role_maker._barrier_worker()
def save_paddle_inference_model(self,
executor,
scope,
program,
feeded_vars,
target_vars,
output_path,
day,
pass_id,
hadoop_fs_name,
hadoop_fs_ugi,
hadoop_home="$HADOOP_HOME",
save_combine=True):
"""
save paddle inference model, and upload to hdfs dnn_plugin path
Args:
executor(Executor): fluid Executor
scope(Scope): fluid Scope
program(Program): fluid Program
feeded_vars(list[Variable]): feed vars
target_vars(list[variable]): fetch vars
output_path(str): hdfs/afs output path
day(str|int): training day
pass_id(str|int): training pass
hadoop_fs_name(str): hadoop fs name
hadoop_fs_ugi(str): hadoop fs ugi
hadoop_home(str): hadoop home, default is "$HADOOP_HOME"
save_combine(bool): whether to save in a file or seperate files,
default is True
Examples:
.. code-block:: python
from paddle.fluid.incubate.fleet.utils.fleet_util import FleetUtil
fleet_util = FleetUtil()
fleet_util.save_paddle_inference_model(exe,
join_scope,
join_program,
feeded_vars,
target_vars,
"hdfs:/my/output/path/",
day=20190727,
pass_id=6,
hadoop_fs_name="xxx",
hadoop_fs_ugi="xxx,xxx")
"""
day = str(day)
pass_id = str(pass_id)
feeded_var_names = [i.name for i in feeded_vars]
model_name = "inference_model"
# pull dense before save
self.pull_all_dense_params(scope, program)
if fleet.worker_index() == 0:
with fluid.scope_guard(scope):
if save_combine:
fluid.io.save_inference_model(
dirname=model_name,
feeded_var_names=feeded_var_names,
target_vars=target_vars,
executor=executor,
main_program=program.clone(),
params_filename="params")
else:
fluid.io.save_inference_model(
dirname=model_name,
feeded_var_names=feeded_var_names,
target_vars=target_vars,
executor=executor,
main_program=program.clone())
configs = {
"fs.default.name": hadoop_fs_name,
"hadoop.job.ugi": hadoop_fs_ugi
}
client = HDFSClient(hadoop_home, configs)
if pass_id == "-1":
dest = "%s/%s/base/dnn_plugin/" % (output_path, day)
else:
dest = "%s/%s/delta-%s/dnn_plugin/" % (output_path, day,
pass_id)
if not client.is_exist(dest):
client.makedirs(dest)
client.upload(dest, model_name)
fleet._role_maker._barrier_worker()
def save_paddle_params(self,
executor,
scope,
program,
model_name,
output_path,
day,
pass_id,
hadoop_fs_name,
hadoop_fs_ugi,
hadoop_home="$HADOOP_HOME",
var_names=None,
save_combine=True):
"""
save paddle model, and upload to hdfs dnn_plugin path
Args:
executor(Executor): fluid Executor
scope(Scope): fluid Scope
program(Program): fluid Program
model_name(str): save model local dir or filename
output_path(str): hdfs/afs output path
day(str|int): training day
pass_id(str|int): training pass
hadoop_fs_name(str): hadoop fs name
hadoop_fs_ugi(str): hadoop fs ugi
hadoop_home(str): hadoop home, default is "$HADOOP_HOME"
var_names(list): save persistable var names, default is None
save_combine(bool): whether to save in a file or seperate files,
default is True
Examples:
.. code-block:: python
from paddle.fluid.incubate.fleet.utils.fleet_util import FleetUtil
fleet_util = FleetUtil()
fleet_util.save_paddle_params(exe,
join_scope,
join_program,
"paddle_dense.model.0",
"hdfs:/my/output/path/",
day=20190727,
pass_id=6,
hadoop_fs_name="xxx",
hadoop_fs_ugi="xxx,xxx",
var_names=join_all_var_names)
fleet_util.save_paddle_params(exe,
join_scope,
join_program,
"paddle_dense.model.usr.0",
"hdfs:/my/output/path/",
day=20190727,
pass_id=6,
hadoop_fs_name="xxx",
hadoop_fs_ugi="xxx,xxx",
var_names=join_user_var_names)
fleet_util.save_paddle_params(exe,
join_scope,
join_program,
"paddle_dense.model.item.0",
"hdfs:/my/output/path/",
day=20190727,
pass_id=6,
hadoop_fs_name="xxx",
hadoop_fs_ugi="xxx,xxx",
var_names=join_user_item_names)
"""
day = str(day)
pass_id = str(pass_id)
# pull dense before save
self.pull_all_dense_params(scope, program)
if fleet.worker_index() == 0:
vars = [program.global_block().var(i) for i in var_names]
with fluid.scope_guard(scope):
if save_combine:
fluid.io.save_vars(
executor, "./", program, vars=vars, filename=model_name)
else:
fluid.io.save_vars(executor, model_name, program, vars=vars)
configs = {
"fs.default.name": hadoop_fs_name,
"hadoop.job.ugi": hadoop_fs_ugi
}
client = HDFSClient(hadoop_home, configs)
if pass_id == "-1":
dest = "%s/%s/base/dnn_plugin/" % (output_path, day)
else:
dest = "%s/%s/delta-%s/dnn_plugin/" % (output_path, day,
pass_id)
if not client.is_exist(dest):
client.makedirs(dest)
if os.path.isdir(model_name):
client.upload_dir(dest, model_name)
else:
client.upload(dest, model_name)
fleet._role_maker._barrier_worker()
def get_last_save_xbox_base(self,
output_path,
hadoop_fs_name,
hadoop_fs_ugi,
hadoop_home="$HADOOP_HOME"):
"""
get last saved base xbox info from xbox_base_done.txt
Args:
output_path(str): output path
hadoop_fs_name(str): hdfs/afs fs_name
hadoop_fs_ugi(str): hdfs/afs fs_ugi
hadoop_home(str): hadoop home, default is "$HADOOP_HOME"
Returns:
[last_save_day, last_path, xbox_base_key]
last_save_day(int): day of saved model
last_path(str): model path
xbox_base_key(int): xbox key
Examples:
.. code-block:: python
from paddle.fluid.incubate.fleet.utils.fleet_util import FleetUtil
fleet_util = FleetUtil()
last_save_day, last_path, xbox_base_key = \
fleet_util.get_last_save_xbox_base("hdfs:/my/path", 20190722,
88)
"""
donefile_path = output_path + "/xbox_base_done.txt"
configs = {
"fs.default.name": hadoop_fs_name,
"hadoop.job.ugi": hadoop_fs_ugi
}
client = HDFSClient(hadoop_home, configs)
if not client.is_file(donefile_path):
return [-1, -1, int(time.time())]
pre_content = client.cat(donefile_path)
last_dict = json.loads(pre_content.split("\n")[-1])
last_day = int(last_dict["input"].split("/")[-3])
last_path = "/".join(last_dict["input"].split("/")[:-1])
xbox_base_key = int(last_dict["key"])
return [last_day, last_path, xbox_base_key]
def get_last_save_xbox(self,
output_path,
hadoop_fs_name,
hadoop_fs_ugi,
hadoop_home="$HADOOP_HOME"):
"""
get last saved xbox info from xbox_patch_done.txt
Args:
output_path(str): output path
hadoop_fs_name(str): hdfs/afs fs_name
hadoop_fs_ugi(str): hdfs/afs fs_ugi
hadoop_home(str): hadoop home, default is "$HADOOP_HOME"
Returns:
[last_save_day, last_save_pass, last_path, xbox_base_key]
last_save_day(int): day of saved model
last_save_pass(int): pass id of saved
last_path(str): model path
xbox_base_key(int): xbox key
Examples:
.. code-block:: python
from paddle.fluid.incubate.fleet.utils.fleet_util import FleetUtil
fleet_util = FleetUtil()
last_save_day, last_save_pass, last_path, xbox_base_key = \
fleet_util.get_last_save_xbox("hdfs:/my/path", 20190722, 88)
"""
donefile_path = output_path + "/xbox_patch_done.txt"
configs = {
"fs.default.name": hadoop_fs_name,
"hadoop.job.ugi": hadoop_fs_ugi
}
client = HDFSClient(hadoop_home, configs)
if not client.is_file(donefile_path):
return [-1, -1, "", int(time.time())]
pre_content = client.cat(donefile_path)
last_dict = json.loads(pre_content.split("\n")[-1])
last_day = int(last_dict["input"].split("/")[-3])
last_pass = int(last_dict["input"].split("/")[-2].split("-")[-1])
last_path = "/".join(last_dict["input"].split("/")[:-1])
xbox_base_key = int(last_dict["key"])
return [last_day, last_pass, last_path, xbox_base_key]
def get_last_save_model(self,
output_path,
hadoop_fs_name,
hadoop_fs_ugi,
hadoop_home="$HADOOP_HOME"):
"""
get last saved model info from donefile.txt
Args:
output_path(str): output path
hadoop_fs_name(str): hdfs/afs fs_name
hadoop_fs_ugi(str): hdfs/afs fs_ugi
hadoop_home(str): hadoop home, default is "$HADOOP_HOME"
Returns:
[last_save_day, last_save_pass, last_path, xbox_base_key]
last_save_day(int): day of saved model
last_save_pass(int): pass id of saved
last_path(str): model path
xbox_base_key(int): xbox key
Examples:
.. code-block:: python
from paddle.fluid.incubate.fleet.utils.fleet_util import FleetUtil
fleet_util = FleetUtil()
last_save_day, last_save_pass, last_path, xbox_base_key = \
fleet_util.get_last_save_model("hdfs:/my/path", 20190722, 88)
"""
last_save_day = -1
last_save_pass = -1
last_path = ""
donefile_path = output_path + "/donefile.txt"
configs = {
"fs.default.name": hadoop_fs_name,
"hadoop.job.ugi": hadoop_fs_ugi
}
client = HDFSClient(hadoop_home, configs)
if not client.is_file(donefile_path):
return [-1, -1, "", int(time.time())]
content = client.cat(donefile_path)
content = content.split("\n")[-1].split("\t")
last_save_day = int(content[0])
last_save_pass = int(content[3])
last_path = content[2]
xbox_base_key = int(content[1])
return [last_save_day, last_save_pass, last_path, xbox_base_key]
def get_online_pass_interval(self, days, hours, split_interval,
split_per_pass, is_data_hourly_placed):
"""
get online pass interval
Args:
days(str): days to train
hours(str): hours to train
split_interval(int|str): split interval
split_per_pass(int}str): split per pass
is_data_hourly_placed(bool): is data hourly placed
Returns:
online_pass_interval(list)
Examples:
.. code-block:: python
from paddle.fluid.incubate.fleet.utils.fleet_util import FleetUtil
fleet_util = FleetUtil()
online_pass_interval = fleet_util.get_online_pass_interval(
days="{20190720..20190729}",
hours="{0..23}",
split_interval=5,
split_per_pass=2,
is_data_hourly_placed=False)
"""
days = os.popen("echo -n " + days).read().split(" ")
hours = os.popen("echo -n " + hours).read().split(" ")
split_interval = int(split_interval)
split_per_pass = int(split_per_pass)
splits_per_day = 24 * 60 / split_interval
pass_per_day = splits_per_day / split_per_pass
left_train_hour = int(hours[0])
right_train_hour = int(hours[-1])
start = 0
split_path = []
for i in range(splits_per_day):
h = start / 60
m = start % 60
if h < left_train_hour or h > right_train_hour:
start += split_interval
continue
if is_data_hourly_placed:
split_path.append("%02d" % h)
else:
split_path.append("%02d%02d" % (h, m))
start += split_interval
start = 0
online_pass_interval = []
for i in range(pass_per_day):
online_pass_interval.append([])
for j in range(start, start + split_per_pass):
online_pass_interval[i].append(split_path[j])
start += split_per_pass
return online_pass_interval
def get_global_metrics(self,
scope=fluid.global_scope(),
stat_pos_name="_generated_var_2",
stat_neg_name="_generated_var_3",
sqrerr_name="sqrerr",
abserr_name="abserr",
prob_name="prob",
q_name="q",
pos_ins_num_name="pos",
total_ins_num_name="total"):
"""
get global metrics, including auc, bucket_error, mae, rmse,
actual_ctr, predicted_ctr, copc, mean_predict_qvalue, total_ins_num.
Args:
scope(Scope): Scope object, default is fluid.global_scope()
stat_pos_name(str): name of auc pos bucket Variable
stat_neg_name(str): name of auc neg bucket Variable
sqrerr_name(str): name of sqrerr Variable
abserr_name(str): name of abserr Variable
prob_name(str): name of prob Variable
q_name(str): name of q Variable
pos_ins_num_name(str): name of pos ins num Variable
total_ins_num_name(str): name of total ins num Variable
Returns:
[auc, bucket_error, mae, rmse, actual_ctr, predicted_ctr, copc,
mean_predict_qvalue, total_ins_num]
Examples:
.. code-block:: python
from paddle.fluid.incubate.fleet.utils.fleet_util import FleetUtil
fleet_util = FleetUtil()
metric_list = fleet_util.get_global_metrics(myscope,
stat_pos.nane,
stat_neg.name,
local_sqrerr.name,
local_abserr.name,
local_prob.name,
local_q.name,
local_pos_ins.name,
local_total_ins.name)
# below is part of example model
label = fluid.layers.data(name="click", shape=[-1, 1],\
dtype="int64", lod_level=0, append_batch_size=False)
emb = my_slot_net(slots, label) # emb can be fc layer of size 1
similarity_norm = fluid.layers.sigmoid(fluid.layers.clip(\
emb, min=-15.0, max=15.0), name="similarity_norm")\
binary_predict = fluid.layers.concat(input=[\
fluid.layers.elementwise_sub(\
fluid.layers.ceil(similarity_norm), similarity_norm),\
similarity_norm], axis=1)
auc, batch_auc, [batch_stat_pos, batch_stat_neg, stat_pos, \
stat_neg] = fluid.layers.auc(input=binary_predict,\
label=label, curve='ROC',\
num_thresholds=4096)
local_sqrerr, local_abserr, local_prob, local_q, local_pos_ins,\
local_total_ins = fluid.contrib.layers.ctr_metric_bundle(\
similarity_norm, label)
"""
if scope.find_var(stat_pos_name) is None or \
scope.find_var(stat_neg_name) is None:
self.rank0_print("not found auc bucket")
return [None] * 9
elif scope.find_var(sqrerr_name) is None:
self.rank0_print("not found sqrerr_name=%s" % sqrerr_name)
return [None] * 9
elif scope.find_var(abserr_name) is None:
self.rank0_print("not found abserr_name=%s" % abserr_name)
return [None] * 9
elif scope.find_var(prob_name) is None:
self.rank0_print("not found prob_name=%s" % prob_name)
return [None] * 9
elif scope.find_var(q_name) is None:
self.rank0_print("not found q_name=%s" % q_name)
return [None] * 9
elif scope.find_var(pos_ins_num_name) is None:
self.rank0_print("not found pos_ins_num_name=%s" % pos_ins_num_name)
return [None] * 9
elif scope.find_var(total_ins_num_name) is None:
self.rank0_print("not found total_ins_num_name=%s" % \
total_ins_num_name)
return [None] * 9
# barrier worker to ensure all workers finished training
fleet._role_maker._barrier_worker()
# get auc
auc = self.get_global_auc(scope, stat_pos_name, stat_neg_name)
pos = np.array(scope.find_var(stat_pos_name).get_tensor())
# auc pos bucket shape
old_pos_shape = np.array(pos.shape)
# reshape to one dim
pos = pos.reshape(-1)
global_pos = np.copy(pos) * 0
# mpi allreduce
fleet._role_maker._all_reduce(pos, global_pos)
# reshape to its original shape
global_pos = global_pos.reshape(old_pos_shape)
# auc neg bucket
neg = np.array(scope.find_var(stat_neg_name).get_tensor())
old_neg_shape = np.array(neg.shape)
neg = neg.reshape(-1)
global_neg = np.copy(neg) * 0
fleet._role_maker._all_reduce(neg, global_neg)
global_neg = global_neg.reshape(old_neg_shape)
num_bucket = len(global_pos[0])
def get_metric(name):
metric = np.array(scope.find_var(name).get_tensor())
old_metric_shape = np.array(metric.shape)
metric = metric.reshape(-1)
global_metric = np.copy(metric) * 0
fleet._role_maker._all_reduce(metric, global_metric)
global_metric = global_metric.reshape(old_metric_shape)
return global_metric[0]
global_sqrerr = get_metric(sqrerr_name)
global_abserr = get_metric(abserr_name)
global_prob = get_metric(prob_name)
global_q_value = get_metric(q_name)
# note: get ins_num from auc bucket is not actual value,
# so get it from metric op
pos_ins_num = get_metric(pos_ins_num_name)
total_ins_num = get_metric(total_ins_num_name)
neg_ins_num = total_ins_num - pos_ins_num
mae = global_abserr / total_ins_num
rmse = math.sqrt(global_sqrerr / total_ins_num)
return_actual_ctr = pos_ins_num / total_ins_num
predicted_ctr = global_prob / total_ins_num
mean_predict_qvalue = global_q_value / total_ins_num
copc = 0.0
if abs(predicted_ctr > 1e-6):
copc = return_actual_ctr / predicted_ctr
# calculate bucket error
last_ctr = -1.0
impression_sum = 0.0
ctr_sum = 0.0
click_sum = 0.0
error_sum = 0.0
error_count = 0.0
click = 0.0
show = 0.0
ctr = 0.0
adjust_ctr = 0.0
relative_error = 0.0
actual_ctr = 0.0
relative_ctr_error = 0.0
k_max_span = 0.01
k_relative_error_bound = 0.05
for i in xrange(num_bucket):
click = global_pos[0][i]
show = global_pos[0][i] + global_neg[0][i]
ctr = float(i) / num_bucket
if abs(ctr - last_ctr) > k_max_span:
last_ctr = ctr
impression_sum = 0.0
ctr_sum = 0.0
click_sum = 0.0
impression_sum += show
ctr_sum += ctr * show
click_sum += click
if impression_sum == 0:
continue
adjust_ctr = ctr_sum / impression_sum
if adjust_ctr == 0:
continue
relative_error = \
math.sqrt((1 - adjust_ctr) / (adjust_ctr * impression_sum))
if relative_error < k_relative_error_bound:
actual_ctr = click_sum / impression_sum
relative_ctr_error = abs(actual_ctr / adjust_ctr - 1)
error_sum += relative_ctr_error * impression_sum
error_count += impression_sum
last_ctr = -1
bucket_error = error_sum / error_count if error_count > 0 else 0.0
return [
auc, bucket_error, mae, rmse, return_actual_ctr, predicted_ctr,
copc, mean_predict_qvalue, int(total_ins_num)
]
def print_global_metrics(self,
scope=fluid.global_scope(),
stat_pos_name="_generated_var_2",
stat_neg_name="_generated_var_3",
sqrerr_name="sqrerr",
abserr_name="abserr",
prob_name="prob",
q_name="q",
pos_ins_num_name="pos",
total_ins_num_name="total",
print_prefix=""):
"""
print global metrics, including auc, bucket_error, mae, rmse,
actual_ctr, predicted_ctr, copc, mean_predict_qvalue, total_ins_num.
Args:
scope(Scope): Scope object, default is fluid.global_scope()
stat_pos_name(str): name of auc pos bucket Variable
stat_neg_name(str): name of auc neg bucket Variable
sqrerr_name(str): name of sqrerr Variable
abserr_name(str): name of abserr Variable
prob_name(str): name of prob Variable
q_name(str): name of q Variable
pos_ins_num_name(str): name of pos ins num Variable
total_ins_num_name(str): name of total ins num Variable
print_prefix(str): print prefix
Examples:
.. code-block:: python
from paddle.fluid.incubate.fleet.utils.fleet_util import FleetUtil
fleet_util = FleetUtil()
fleet_util.print_global_metrics(myscope,
stat_pos.nane,
stat_neg.name,
local_sqrerr.name,
local_abserr.name,
local_prob.name,
local_q.name,
local_pos_ins.name,
local_total_ins.name)
# below is part of model
label = fluid.layers.data(name="click", shape=[-1, 1],\
dtype="int64", lod_level=0, append_batch_size=False)
emb = my_slot_net(slots, label) # emb can be fc layer of size 1
similarity_norm = fluid.layers.sigmoid(fluid.layers.clip(\
emb, min=-15.0, max=15.0), name="similarity_norm")\
binary_predict = fluid.layers.concat(input=[\
fluid.layers.elementwise_sub(\
fluid.layers.ceil(similarity_norm), similarity_norm),\
similarity_norm], axis=1)
auc, batch_auc, [batch_stat_pos, batch_stat_neg, stat_pos, \
stat_neg] = fluid.layers.auc(input=binary_predict,\
label=label, curve='ROC',\
num_thresholds=4096)
local_sqrerr, local_abserr, local_prob, local_q, local_pos_ins, \
local_total_ins = fluid.contrib.layers.ctr_metric_bundle(\
similarity_norm, label)
"""
if scope.find_var(stat_pos_name) is None or \
scope.find_var(stat_neg_name) is None:
self.rank0_print("not found auc bucket")
return
elif scope.find_var(sqrerr_name) is None:
self.rank0_print("not found sqrerr_name=%s" % sqrerr_name)
return
elif scope.find_var(abserr_name) is None:
self.rank0_print("not found abserr_name=%s" % abserr_name)
return
elif scope.find_var(prob_name) is None:
self.rank0_print("not found prob_name=%s" % prob_name)
return
elif scope.find_var(q_name) is None:
self.rank0_print("not found q_name=%s" % q_name)
return
elif scope.find_var(pos_ins_num_name) is None:
self.rank0_print("not found pos_ins_num_name=%s" % pos_ins_num_name)
return
elif scope.find_var(total_ins_num_name) is None:
self.rank0_print("not found total_ins_num_name=%s" % \
total_ins_num_name)
return
auc, bucket_error, mae, rmse, actual_ctr, predicted_ctr, copc,\
mean_predict_qvalue, total_ins_num = self.get_global_metrics(\
scope, stat_pos_name, stat_neg_name, sqrerr_name, abserr_name,\
prob_name, q_name, pos_ins_num_name, total_ins_num_name)
self.rank0_print("%s global AUC=%.6f BUCKET_ERROR=%.6f MAE=%.6f "
"RMSE=%.6f Actural_CTR=%.6f Predicted_CTR=%.6f "
"COPC=%.6f MEAN Q_VALUE=%.6f Ins number=%s" %
(print_prefix, auc, bucket_error, mae, rmse,
actual_ctr, predicted_ctr, copc, mean_predict_qvalue,
total_ins_num))
def program_type_trans(self, prog_dir, prog_fn, is_text):
return utils.program_type_trans(prog_dir, prog_fn, is_text)
def draw_from_program_file(self, model_filename, is_text, output_dir,
output_filename):
"""draw program from file"""
program = utils.load_program(model_filename, is_text)
utils.graphviz(program.global_block(), output_dir, output_filename)
def draw_from_program(self, program, output_dir, output_name):
"""draw Program"""
utils.graphviz(program.global_block(), output_dir, output_name)
def check_two_programs(self, config):
train_prog = utils.load_program(config.train_prog_path,
config.is_text_train_program)
pruned_prog = utils.load_program(config.pruned_prog_path,
config.is_text_pruned_program)
if config.draw:
pruned_dir = os.path.dirname(config.pruned_prog_path)
self.draw_from_program(pruned_prog, pruned_dir,
config.draw_out_name)
res = utils.check_pruned_program_vars(train_prog, pruned_prog)
if res:
_logger.info("check_programs succeed.")
else:
_logger.info(
"check_programs failed. pruned program and train program not match!"
)
return res
def check_vars_and_dump(self, config):
_logger.info("start check_vars_and_dump.")
results = utils.check_saved_vars_try_dump(
config.dump_model_dir, config.dump_program_filename,
config.is_text_dump_program, config.feed_config,
config.fetch_config, config.batch_size, config.save_params_filename)
_logger.info("check_vars_and_dump succeed.")
return results
def parse_program_proto(self, prog_path, is_text, output_dir):
"""
Parse program.proto into a more readable format.
This function will generate three files:
output_dir/vars_all.log,
output_dir/vars_persistable.log,
output_dir/ops.log.
Args:
prog_path(str): proto file path to be parsed.
is_text(bool): proto file is human-readale format or not(binary).
output_dir(str): output dir.
Examples:
.. code-block:: python
from paddle.fluid.incubate.fleet.utils.fleet_util import FleetUtil
fleet_util = FleetUtil()
program_path = "./program.pbtxt"
is_text = True
output_dir = "/tmp/"
fleet_util.parse_program_proto(program_path, is_text, output_dir)
"""
program = utils.load_program(prog_path, is_text)
utils.parse_program(program, output_dir)
| 39.751545 | 104 | 0.520181 |
4a1fd20d8cbbf1ad85e8e5520c666ed5ab06702e | 6,774 | py | Python | deprecated/examples/dist_test/train.py | hutuxian/FleetX | 843c7aa33f5a14680becf058a3aaf0327eefafd4 | [
"Apache-2.0"
] | 170 | 2020-08-12T12:07:01.000Z | 2022-03-07T02:38:26.000Z | deprecated/examples/dist_test/train.py | hutuxian/FleetX | 843c7aa33f5a14680becf058a3aaf0327eefafd4 | [
"Apache-2.0"
] | 195 | 2020-08-13T03:22:15.000Z | 2022-03-30T07:40:25.000Z | deprecated/examples/dist_test/train.py | hutuxian/FleetX | 843c7aa33f5a14680becf058a3aaf0327eefafd4 | [
"Apache-2.0"
] | 67 | 2020-08-14T02:07:46.000Z | 2022-03-28T10:05:33.000Z | # -*- coding: utf-8 -*-
from __future__ import print_function
import os
import numpy as np
import argparse
import paddle
import paddle.fluid as fluid
from paddle.fluid.incubate.fleet.collective import fleet, DistributedStrategy
from paddle.fluid.incubate.fleet.base import role_maker
import model
import utils
parser = argparse.ArgumentParser(description='Argument settings')
parser.add_argument('--distributed', action='store_true', default=False,
help='distributed training flag for showing the differences in code that'
' distinguish distributed training from single-card training. NOTICE:'
' Do not specify this flag in single-card training mode')
parser.add_argument('--batch_size', type=int, default=16, help='batch size')
parser.add_argument('--epoch_num', type=int, default=2,
help='epoch number for training')
parser.add_argument('--learning_rate', type=float, default=2e-3,
help='learning rate')
args = parser.parse_args()
def main():
'''
Main function for training to illustrate steps for common distributed
training configuration
'''
if args.distributed:
# initialize distributed environment
init_dist_env()
# create an executor and its place
place = create_place(args.distributed)
exe = create_executor(place)
# create training network, including a startup program and a main program
train_prog, start_prog = fluid.Program(), fluid.Program()
with fluid.program_guard(train_prog, start_prog):
feed, fetch = model.build_train_net()
# create an optimizer and do minimize which related to distributed strategy
optimizer = create_optimizer(args.distributed)
optimizer.minimize(fetch[0], start_prog)
# create train-data loader, depending on "distributed" flag to slice dataset
loader = create_train_dataloader(feed, place, args.distributed)
if args.distributed:
# If "distributed" flag is set, the following assignment have to be done before running
# train program to set the target program with distributed strategy applied. Which we
# will optimize off in the furture
train_prog = fleet.main_program
# do train
train(train_prog, start_prog, exe, feed, fetch, loader)
# create testing network
test_prog = fluid.Program()
with fluid.program_guard(test_prog):
feed, fetch = model.build_test_net()
# create test-data loader, depending on "distributed" flag to slice dataset
loader = create_test_dataloader(feed, place, args.distributed)
# run single-card evaluation to calculate metric for local dataset
local_value, local_weight = test(test_prog, exe, feed, fetch, loader)
if args.distributed:
# if "distributed" flag is set, gather all evaluation metrics from other workers
dist_acc = utils.dist_eval_acc(exe, local_value, local_weight)
print('[TEST] global_acc1: %.2f' % dist_acc)
def init_dist_env():
'''
Initialize distributed environment with Paddle Fleet
'''
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
def create_place(is_distributed):
'''
Specify device index according to the distributed environment variable
'''
place_idx = int(os.environ['FLAGS_selected_gpus']) if is_distributed else 0
return fluid.CUDAPlace(place_idx)
def distributed_optimize(optimizer):
'''
A part of configuration for distributed training
'''
strategy = DistributedStrategy()
strategy.fuse_all_reduce_ops = True
strategy.nccl_comm_num = 2
strategy.fuse_elewise_add_act_ops=True
strategy.fuse_bn_act_ops = True
return fleet.distributed_optimizer(optimizer, strategy=strategy)
def create_executor(place):
'''
Create executor
'''
exe = fluid.Executor(place)
return exe
def create_optimizer(is_distributed):
'''
Create optimizer, and decide whether to apply distributd strategy according
to the "distributed" flag
'''
optimizer = fluid.optimizer.SGD(learning_rate=args.learning_rate)
if is_distributed:
optimizer = distributed_optimize(optimizer)
return optimizer
def create_train_dataloader(feed, place, is_distributed):
'''
Use local training dataset if it is found, otherwise, download dataset
'''
train_data_path = 'dataset/train-images-idx3-ubyte.gz'
train_label_path = 'dataset/train-labels-idx1-ubyte.gz'
if os.path.exists(train_data_path) and os.path.exists(train_label_path):
reader = paddle.dataset.mnist.reader_creator(train_data_path,
train_label_path, 100)
else:
reader = paddle.dataset.mnist.train()
return utils.create_dataloader(reader, feed, place,
batch_size=args.batch_size, is_test=False, is_distributed=is_distributed)
def create_test_dataloader(feed, place, is_distributed):
'''
Use local testing dataset if it is found, otherwise, download dataset
'''
test_data_path = 'dataset/t10k-images-idx3-ubyte.gz'
test_label_path = 'dataset/t10k-labels-idx1-ubyte.gz'
if os.path.exists(test_data_path) and os.path.exists(test_label_path):
reader = paddle.dataset.mnist.reader_creator(test_data_path,
test_label_path, 100)
else:
reader = paddle.dataset.mnist.test()
return utils.create_dataloader(reader, feed, place,
batch_size=args.batch_size, is_test=True, is_distributed=is_distributed)
def train(train_prog, start_prog, exe, feed, fetch, loader):
'''
The common training code. Run startup program to initialize parameter first, and
then do training batch by batch until reaching the target epoch number
'''
exe.run(start_prog)
for epoch in range(args.epoch_num):
for idx, sample in enumerate(loader()):
ret = exe.run(train_prog, feed=sample, fetch_list=fetch)
if idx % 100 == 0:
print('[TRAIN] epoch=%d step=%d loss=%f' % (epoch, idx, ret[0][0]))
def test(test_prog, exe, feed, fetch, loader):
'''
Taking Accuracy evaluation as an example, numbers of correct and total predictions
(acc_manager.value and acc_manager.weight) are accumulated locally and then they are
gathered to calculate the global accuracy metric
'''
acc_manager = fluid.metrics.Accuracy()
for idx, sample in enumerate(loader()):
ret = exe.run(test_prog, feed=sample, fetch_list=fetch)
acc_manager.update(value=ret[0], weight=utils.sample_batch(sample))
if idx % 100 == 0:
print('[TEST] step=%d accum_acc1=%.2f' % (idx, acc_manager.eval()))
print('[TEST] local_acc1: %.2f' % acc_manager.eval())
return acc_manager.value, acc_manager.weight
if __name__ == '__main__':
main()
| 36.616216 | 95 | 0.711544 |
4a1fd244add56d18910e3cc58aa40a55350395d3 | 2,427 | py | Python | ext/ws/rs/core/__init__.py | mariano-git/plugin.video.flow | a63362f308c1e67ba532acf9ba4eced95a243f49 | [
"MIT"
] | 3 | 2022-01-19T00:27:24.000Z | 2022-03-24T15:15:24.000Z | ext/ws/rs/core/__init__.py | mariano-git/plugin.video.flow | a63362f308c1e67ba532acf9ba4eced95a243f49 | [
"MIT"
] | 3 | 2021-12-30T06:05:30.000Z | 2022-01-19T02:52:21.000Z | ext/ws/rs/core/__init__.py | mariano-git/plugin.video.flow | a63362f308c1e67ba532acf9ba4eced95a243f49 | [
"MIT"
] | null | null | null | from enum import Enum
class MediaType:
APPLICATION_JSON = 'application/json'
TEXT_PLAIN = 'text/plain'
class Status(Enum):
def __init__(self, code, reason):
self.code = code
self.reason = reason
OK = (200, "OK")
CREATED = (201, "Created")
ACCEPTED = (202, "Accepted")
NO_CONTENT = (204, "No Content")
RESET_CONTENT = (205, "Reset Content")
PARTIAL_CONTENT = (206, "Partial Content")
MOVED_PERMANENTLY = (301, "Moved Permanently")
FOUND = (302, "Found")
SEE_OTHER = (303, "See Other")
NOT_MODIFIED = (304, "Not Modified")
USE_PROXY = (305, "Use Proxy")
TEMPORARY_REDIRECT = (307, "Temporary Redirect")
BAD_REQUEST = (400, "Bad Request")
UNAUTHORIZED = (401, "Unauthorized")
PAYMENT_REQUIRED = (402, "Payment Required")
FORBIDDEN = (403, "Forbidden")
NOT_FOUND = (404, "Not Found")
METHOD_NOT_ALLOWED = (405, "Method Not Allowed")
NOT_ACCEPTABLE = (406, "Not Acceptable")
PROXY_AUTHENTICATION_REQUIRED = (407, "Proxy Authentication Required")
REQUEST_TIMEOUT = (408, "Request Timeout")
CONFLICT = (409, "Conflict")
GONE = (410, "Gone")
LENGTH_REQUIRED = (411, "Length Required")
PRECONDITION_FAILED = (412, "Precondition Failed")
REQUEST_ENTITY_TOO_LARGE = (413, "Request Entity Too Large")
REQUEST_URI_TOO_LONG = (414, "Request-URI Too Long")
UNSUPPORTED_MEDIA_TYPE = (415, "Unsupported Media Type")
REQUESTED_RANGE_NOT_SATISFIABLE = (416, "Requested Range Not Satisfiable")
EXPECTATION_FAILED = (417, "Expectation Failed")
PRECONDITION_REQUIRED = (428, "Precondition Required")
TOO_MANY_REQUESTS = (429, "Too Many Requests")
REQUEST_HEADER_FIELDS_TOO_LARGE = (431, "Request Header Fields Too Large")
INTERNAL_SERVER_ERROR = (500, "Internal Server Error")
NOT_IMPLEMENTED = (501, "Not Implemented")
BAD_GATEWAY = (502, "Bad Gateway")
SERVICE_UNAVAILABLE = (503, "Service Unavailable")
GATEWAY_TIMEOUT = (504, "Gateway Timeout")
HTTP_VERSION_NOT_SUPPORTED = (505, "HTTP Version Not Supported")
NETWORK_AUTHENTICATION_REQUIRED = (511, "Network Authentication Required")
@classmethod
def fromCode(cls, code: int):
for stat in Status:
if stat.code == code:
return stat
class Response:
def __init__(self, response):
self.response = response
| 23.114286 | 78 | 0.65925 |
4a1fd3302e80b14efcffbfe59178228748d3e466 | 2,666 | py | Python | neutron/plugins/ml2/extensions/qos.py | rolaya/neutron | 49f6773998ce8e8c68197a853d7f12e5e9dc6df5 | [
"Apache-2.0"
] | null | null | null | neutron/plugins/ml2/extensions/qos.py | rolaya/neutron | 49f6773998ce8e8c68197a853d7f12e5e9dc6df5 | [
"Apache-2.0"
] | null | null | null | neutron/plugins/ml2/extensions/qos.py | rolaya/neutron | 49f6773998ce8e8c68197a853d7f12e5e9dc6df5 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2015 Red Hat Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.plugins.ml2 import api
from oslo_log import log as logging
from neutron.core_extensions import base as base_core
from neutron.core_extensions import qos as qos_core
from neutron.common import log_utils
LOG = logging.getLogger(__name__)
QOS_EXT_DRIVER_ALIAS = 'qos'
class QosExtensionDriver(api.ExtensionDriver):
LOG.info('%s(): caller(): %s', log_utils.get_fname(1), log_utils.get_fname(2))
def initialize(self):
LOG.info('%s(): caller(): %s', log_utils.get_fname(1), log_utils.get_fname(2))
self.core_ext_handler = qos_core.QosCoreResourceExtension()
LOG.debug("QosExtensionDriver initialization complete")
def process_create_network(self, context, data, result):
LOG.info('%s(): caller(): %s', log_utils.get_fname(1), log_utils.get_fname(2))
self.core_ext_handler.process_fields(
context, base_core.NETWORK, base_core.EVENT_CREATE, data, result)
def process_update_network(self, context, data, result):
LOG.info('%s(): caller(): %s', log_utils.get_fname(1), log_utils.get_fname(2))
self.core_ext_handler.process_fields(
context, base_core.NETWORK, base_core.EVENT_UPDATE, data, result)
def process_create_port(self, context, data, result):
LOG.info('%s(): caller(): %s', log_utils.get_fname(1), log_utils.get_fname(2))
self.core_ext_handler.process_fields(
context, base_core.PORT, base_core.EVENT_UPDATE, data, result)
process_update_port = process_create_port
def extend_network_dict(self, session, db_data, result):
LOG.info('%s(): caller(): %s', log_utils.get_fname(1), log_utils.get_fname(2))
result.update(
self.core_ext_handler.extract_fields(
base_core.NETWORK, db_data))
def extend_port_dict(self, session, db_data, result):
LOG.info('%s(): caller(): %s', log_utils.get_fname(1), log_utils.get_fname(2))
result.update(
self.core_ext_handler.extract_fields(base_core.PORT, db_data))
| 42.31746 | 86 | 0.705551 |
4a1fd371969f4c33eb7efa3a6ea0a91abab62491 | 837 | py | Python | tests/test_md5.py | petitnau/crypy | 05bacef2cc169359c2a957bb4964f0f0416e2ead | [
"MIT"
] | null | null | null | tests/test_md5.py | petitnau/crypy | 05bacef2cc169359c2a957bb4964f0f0416e2ead | [
"MIT"
] | null | null | null | tests/test_md5.py | petitnau/crypy | 05bacef2cc169359c2a957bb4964f0f0416e2ead | [
"MIT"
] | null | null | null | from crypy.hash.md5 import MD5
def test_md5_extend():
secret = b"123SECRET"
msg = b"messaggio normale"
h = MD5(secret+msg)
h.test()
dig = h.digest()
base, h_clone = MD5.clone(len(secret), msg, dig)
extension = b"extended"
h_clone.update(extension)
tot_msg = base+extension
assert h_clone.digest() == MD5(secret+tot_msg).digest()
def test_md5():
hashes = [
("ciao", "6e6bc4e49dd477ebc98ef4046c067b5f"),
("long long long long long long long really long test", "024b394e7c7693798abb230ae256787e")
]
md5 = MD5()
md5.update(hashes[0][0])
assert md5.hexdigest() == hashes[0][1]
assert MD5().update(hashes[0][0]).hexdigest() == hashes[0][1]
assert MD5(hashes[0][0]).hexdigest() == hashes[0][1]
assert MD5(hashes[1][0]).hexdigest() == hashes[1][1]
| 25.363636 | 99 | 0.628435 |
4a1fd37ec8d6a8a7f1afd1d8410741b53083b5d0 | 655,663 | py | Python | Python36_x86_Template/Lib/pydoc_data/topics.py | iveskim/cloudbase-init-installer | bc7630a7fb7dd527618dac3938147e2d9439c285 | [
"Apache-2.0"
] | null | null | null | Python36_x86_Template/Lib/pydoc_data/topics.py | iveskim/cloudbase-init-installer | bc7630a7fb7dd527618dac3938147e2d9439c285 | [
"Apache-2.0"
] | null | null | null | Python36_x86_Template/Lib/pydoc_data/topics.py | iveskim/cloudbase-init-installer | bc7630a7fb7dd527618dac3938147e2d9439c285 | [
"Apache-2.0"
] | 4 | 2019-12-11T18:50:22.000Z | 2020-08-10T19:25:11.000Z | # -*- coding: utf-8 -*-
# Autogenerated by Sphinx on Tue Jun 12 00:16:23 2018
topics = {'assert': 'The "assert" statement\n'
'**********************\n'
'\n'
'Assert statements are a convenient way to insert debugging '
'assertions\n'
'into a program:\n'
'\n'
' assert_stmt ::= "assert" expression ["," expression]\n'
'\n'
'The simple form, "assert expression", is equivalent to\n'
'\n'
' if __debug__:\n'
' if not expression: raise AssertionError\n'
'\n'
'The extended form, "assert expression1, expression2", is '
'equivalent to\n'
'\n'
' if __debug__:\n'
' if not expression1: raise AssertionError(expression2)\n'
'\n'
'These equivalences assume that "__debug__" and "AssertionError" '
'refer\n'
'to the built-in variables with those names. In the current\n'
'implementation, the built-in variable "__debug__" is "True" under\n'
'normal circumstances, "False" when optimization is requested '
'(command\n'
'line option -O). The current code generator emits no code for an\n'
'assert statement when optimization is requested at compile time. '
'Note\n'
'that it is unnecessary to include the source code for the '
'expression\n'
'that failed in the error message; it will be displayed as part of '
'the\n'
'stack trace.\n'
'\n'
'Assignments to "__debug__" are illegal. The value for the '
'built-in\n'
'variable is determined when the interpreter starts.\n',
'assignment': 'Assignment statements\n'
'*********************\n'
'\n'
'Assignment statements are used to (re)bind names to values and '
'to\n'
'modify attributes or items of mutable objects:\n'
'\n'
' assignment_stmt ::= (target_list "=")+ (starred_expression '
'| yield_expression)\n'
' target_list ::= target ("," target)* [","]\n'
' target ::= identifier\n'
' | "(" [target_list] ")"\n'
' | "[" [target_list] "]"\n'
' | attributeref\n'
' | subscription\n'
' | slicing\n'
' | "*" target\n'
'\n'
'(See section Primaries for the syntax definitions for '
'*attributeref*,\n'
'*subscription*, and *slicing*.)\n'
'\n'
'An assignment statement evaluates the expression list '
'(remember that\n'
'this can be a single expression or a comma-separated list, the '
'latter\n'
'yielding a tuple) and assigns the single resulting object to '
'each of\n'
'the target lists, from left to right.\n'
'\n'
'Assignment is defined recursively depending on the form of the '
'target\n'
'(list). When a target is part of a mutable object (an '
'attribute\n'
'reference, subscription or slicing), the mutable object must\n'
'ultimately perform the assignment and decide about its '
'validity, and\n'
'may raise an exception if the assignment is unacceptable. The '
'rules\n'
'observed by various types and the exceptions raised are given '
'with the\n'
'definition of the object types (see section The standard type\n'
'hierarchy).\n'
'\n'
'Assignment of an object to a target list, optionally enclosed '
'in\n'
'parentheses or square brackets, is recursively defined as '
'follows.\n'
'\n'
'* If the target list is empty: The object must also be an '
'empty\n'
' iterable.\n'
'\n'
'* If the target list is a single target in parentheses: The '
'object\n'
' is assigned to that target.\n'
'\n'
'* If the target list is a comma-separated list of targets, or '
'a\n'
' single target in square brackets: The object must be an '
'iterable\n'
' with the same number of items as there are targets in the '
'target\n'
' list, and the items are assigned, from left to right, to '
'the\n'
' corresponding targets.\n'
'\n'
' * If the target list contains one target prefixed with an\n'
' asterisk, called a “starred” target: The object must be '
'an\n'
' iterable with at least as many items as there are targets '
'in the\n'
' target list, minus one. The first items of the iterable '
'are\n'
' assigned, from left to right, to the targets before the '
'starred\n'
' target. The final items of the iterable are assigned to '
'the\n'
' targets after the starred target. A list of the remaining '
'items\n'
' in the iterable is then assigned to the starred target '
'(the list\n'
' can be empty).\n'
'\n'
' * Else: The object must be an iterable with the same number '
'of\n'
' items as there are targets in the target list, and the '
'items are\n'
' assigned, from left to right, to the corresponding '
'targets.\n'
'\n'
'Assignment of an object to a single target is recursively '
'defined as\n'
'follows.\n'
'\n'
'* If the target is an identifier (name):\n'
'\n'
' * If the name does not occur in a "global" or "nonlocal" '
'statement\n'
' in the current code block: the name is bound to the object '
'in the\n'
' current local namespace.\n'
'\n'
' * Otherwise: the name is bound to the object in the global\n'
' namespace or the outer namespace determined by '
'"nonlocal",\n'
' respectively.\n'
'\n'
' The name is rebound if it was already bound. This may cause '
'the\n'
' reference count for the object previously bound to the name '
'to reach\n'
' zero, causing the object to be deallocated and its '
'destructor (if it\n'
' has one) to be called.\n'
'\n'
'* If the target is an attribute reference: The primary '
'expression in\n'
' the reference is evaluated. It should yield an object with\n'
' assignable attributes; if this is not the case, "TypeError" '
'is\n'
' raised. That object is then asked to assign the assigned '
'object to\n'
' the given attribute; if it cannot perform the assignment, it '
'raises\n'
' an exception (usually but not necessarily '
'"AttributeError").\n'
'\n'
' Note: If the object is a class instance and the attribute '
'reference\n'
' occurs on both sides of the assignment operator, the RHS '
'expression,\n'
' "a.x" can access either an instance attribute or (if no '
'instance\n'
' attribute exists) a class attribute. The LHS target "a.x" '
'is always\n'
' set as an instance attribute, creating it if necessary. '
'Thus, the\n'
' two occurrences of "a.x" do not necessarily refer to the '
'same\n'
' attribute: if the RHS expression refers to a class '
'attribute, the\n'
' LHS creates a new instance attribute as the target of the\n'
' assignment:\n'
'\n'
' class Cls:\n'
' x = 3 # class variable\n'
' inst = Cls()\n'
' inst.x = inst.x + 1 # writes inst.x as 4 leaving Cls.x '
'as 3\n'
'\n'
' This description does not necessarily apply to descriptor\n'
' attributes, such as properties created with "property()".\n'
'\n'
'* If the target is a subscription: The primary expression in '
'the\n'
' reference is evaluated. It should yield either a mutable '
'sequence\n'
' object (such as a list) or a mapping object (such as a '
'dictionary).\n'
' Next, the subscript expression is evaluated.\n'
'\n'
' If the primary is a mutable sequence object (such as a '
'list), the\n'
' subscript must yield an integer. If it is negative, the '
'sequence’s\n'
' length is added to it. The resulting value must be a '
'nonnegative\n'
' integer less than the sequence’s length, and the sequence is '
'asked\n'
' to assign the assigned object to its item with that index. '
'If the\n'
' index is out of range, "IndexError" is raised (assignment to '
'a\n'
' subscripted sequence cannot add new items to a list).\n'
'\n'
' If the primary is a mapping object (such as a dictionary), '
'the\n'
' subscript must have a type compatible with the mapping’s key '
'type,\n'
' and the mapping is then asked to create a key/datum pair '
'which maps\n'
' the subscript to the assigned object. This can either '
'replace an\n'
' existing key/value pair with the same key value, or insert a '
'new\n'
' key/value pair (if no key with the same value existed).\n'
'\n'
' For user-defined objects, the "__setitem__()" method is '
'called with\n'
' appropriate arguments.\n'
'\n'
'* If the target is a slicing: The primary expression in the\n'
' reference is evaluated. It should yield a mutable sequence '
'object\n'
' (such as a list). The assigned object should be a sequence '
'object\n'
' of the same type. Next, the lower and upper bound '
'expressions are\n'
' evaluated, insofar they are present; defaults are zero and '
'the\n'
' sequence’s length. The bounds should evaluate to integers. '
'If\n'
' either bound is negative, the sequence’s length is added to '
'it. The\n'
' resulting bounds are clipped to lie between zero and the '
'sequence’s\n'
' length, inclusive. Finally, the sequence object is asked to '
'replace\n'
' the slice with the items of the assigned sequence. The '
'length of\n'
' the slice may be different from the length of the assigned '
'sequence,\n'
' thus changing the length of the target sequence, if the '
'target\n'
' sequence allows it.\n'
'\n'
'**CPython implementation detail:** In the current '
'implementation, the\n'
'syntax for targets is taken to be the same as for expressions, '
'and\n'
'invalid syntax is rejected during the code generation phase, '
'causing\n'
'less detailed error messages.\n'
'\n'
'Although the definition of assignment implies that overlaps '
'between\n'
'the left-hand side and the right-hand side are ‘simultaneous’ '
'(for\n'
'example "a, b = b, a" swaps two variables), overlaps *within* '
'the\n'
'collection of assigned-to variables occur left-to-right, '
'sometimes\n'
'resulting in confusion. For instance, the following program '
'prints\n'
'"[0, 2]":\n'
'\n'
' x = [0, 1]\n'
' i = 0\n'
' i, x[i] = 1, 2 # i is updated, then x[i] is '
'updated\n'
' print(x)\n'
'\n'
'See also:\n'
'\n'
' **PEP 3132** - Extended Iterable Unpacking\n'
' The specification for the "*target" feature.\n'
'\n'
'\n'
'Augmented assignment statements\n'
'===============================\n'
'\n'
'Augmented assignment is the combination, in a single '
'statement, of a\n'
'binary operation and an assignment statement:\n'
'\n'
' augmented_assignment_stmt ::= augtarget augop '
'(expression_list | yield_expression)\n'
' augtarget ::= identifier | attributeref | '
'subscription | slicing\n'
' augop ::= "+=" | "-=" | "*=" | "@=" | '
'"/=" | "//=" | "%=" | "**="\n'
' | ">>=" | "<<=" | "&=" | "^=" | "|="\n'
'\n'
'(See section Primaries for the syntax definitions of the last '
'three\n'
'symbols.)\n'
'\n'
'An augmented assignment evaluates the target (which, unlike '
'normal\n'
'assignment statements, cannot be an unpacking) and the '
'expression\n'
'list, performs the binary operation specific to the type of '
'assignment\n'
'on the two operands, and assigns the result to the original '
'target.\n'
'The target is only evaluated once.\n'
'\n'
'An augmented assignment expression like "x += 1" can be '
'rewritten as\n'
'"x = x + 1" to achieve a similar, but not exactly equal '
'effect. In the\n'
'augmented version, "x" is only evaluated once. Also, when '
'possible,\n'
'the actual operation is performed *in-place*, meaning that '
'rather than\n'
'creating a new object and assigning that to the target, the '
'old object\n'
'is modified instead.\n'
'\n'
'Unlike normal assignments, augmented assignments evaluate the '
'left-\n'
'hand side *before* evaluating the right-hand side. For '
'example, "a[i]\n'
'+= f(x)" first looks-up "a[i]", then it evaluates "f(x)" and '
'performs\n'
'the addition, and lastly, it writes the result back to '
'"a[i]".\n'
'\n'
'With the exception of assigning to tuples and multiple targets '
'in a\n'
'single statement, the assignment done by augmented assignment\n'
'statements is handled the same way as normal assignments. '
'Similarly,\n'
'with the exception of the possible *in-place* behavior, the '
'binary\n'
'operation performed by augmented assignment is the same as the '
'normal\n'
'binary operations.\n'
'\n'
'For targets which are attribute references, the same caveat '
'about\n'
'class and instance attributes applies as for regular '
'assignments.\n'
'\n'
'\n'
'Annotated assignment statements\n'
'===============================\n'
'\n'
'Annotation assignment is the combination, in a single '
'statement, of a\n'
'variable or attribute annotation and an optional assignment '
'statement:\n'
'\n'
' annotated_assignment_stmt ::= augtarget ":" expression ["=" '
'expression]\n'
'\n'
'The difference from normal Assignment statements is that only '
'single\n'
'target and only single right hand side value is allowed.\n'
'\n'
'For simple names as assignment targets, if in class or module '
'scope,\n'
'the annotations are evaluated and stored in a special class or '
'module\n'
'attribute "__annotations__" that is a dictionary mapping from '
'variable\n'
'names (mangled if private) to evaluated annotations. This '
'attribute is\n'
'writable and is automatically created at the start of class or '
'module\n'
'body execution, if annotations are found statically.\n'
'\n'
'For expressions as assignment targets, the annotations are '
'evaluated\n'
'if in class or module scope, but not stored.\n'
'\n'
'If a name is annotated in a function scope, then this name is '
'local\n'
'for that scope. Annotations are never evaluated and stored in '
'function\n'
'scopes.\n'
'\n'
'If the right hand side is present, an annotated assignment '
'performs\n'
'the actual assignment before evaluating annotations (where\n'
'applicable). If the right hand side is not present for an '
'expression\n'
'target, then the interpreter evaluates the target except for '
'the last\n'
'"__setitem__()" or "__setattr__()" call.\n'
'\n'
'See also: **PEP 526** - Variable and attribute annotation '
'syntax\n'
' **PEP 484** - Type hints\n',
'atom-identifiers': 'Identifiers (Names)\n'
'*******************\n'
'\n'
'An identifier occurring as an atom is a name. See '
'section Identifiers\n'
'and keywords for lexical definition and section Naming '
'and binding for\n'
'documentation of naming and binding.\n'
'\n'
'When the name is bound to an object, evaluation of the '
'atom yields\n'
'that object. When a name is not bound, an attempt to '
'evaluate it\n'
'raises a "NameError" exception.\n'
'\n'
'**Private name mangling:** When an identifier that '
'textually occurs in\n'
'a class definition begins with two or more underscore '
'characters and\n'
'does not end in two or more underscores, it is '
'considered a *private\n'
'name* of that class. Private names are transformed to a '
'longer form\n'
'before code is generated for them. The transformation '
'inserts the\n'
'class name, with leading underscores removed and a '
'single underscore\n'
'inserted, in front of the name. For example, the '
'identifier "__spam"\n'
'occurring in a class named "Ham" will be transformed to '
'"_Ham__spam".\n'
'This transformation is independent of the syntactical '
'context in which\n'
'the identifier is used. If the transformed name is '
'extremely long\n'
'(longer than 255 characters), implementation defined '
'truncation may\n'
'happen. If the class name consists only of underscores, '
'no\n'
'transformation is done.\n',
'atom-literals': 'Literals\n'
'********\n'
'\n'
'Python supports string and bytes literals and various '
'numeric\n'
'literals:\n'
'\n'
' literal ::= stringliteral | bytesliteral\n'
' | integer | floatnumber | imagnumber\n'
'\n'
'Evaluation of a literal yields an object of the given type '
'(string,\n'
'bytes, integer, floating point number, complex number) with '
'the given\n'
'value. The value may be approximated in the case of '
'floating point\n'
'and imaginary (complex) literals. See section Literals for '
'details.\n'
'\n'
'All literals correspond to immutable data types, and hence '
'the\n'
'object’s identity is less important than its value. '
'Multiple\n'
'evaluations of literals with the same value (either the '
'same\n'
'occurrence in the program text or a different occurrence) '
'may obtain\n'
'the same object or a different object with the same '
'value.\n',
'attribute-access': 'Customizing attribute access\n'
'****************************\n'
'\n'
'The following methods can be defined to customize the '
'meaning of\n'
'attribute access (use of, assignment to, or deletion of '
'"x.name") for\n'
'class instances.\n'
'\n'
'object.__getattr__(self, name)\n'
'\n'
' Called when the default attribute access fails with '
'an\n'
' "AttributeError" (either "__getattribute__()" raises '
'an\n'
' "AttributeError" because *name* is not an instance '
'attribute or an\n'
' attribute in the class tree for "self"; or '
'"__get__()" of a *name*\n'
' property raises "AttributeError"). This method '
'should either\n'
' return the (computed) attribute value or raise an '
'"AttributeError"\n'
' exception.\n'
'\n'
' Note that if the attribute is found through the '
'normal mechanism,\n'
' "__getattr__()" is not called. (This is an '
'intentional asymmetry\n'
' between "__getattr__()" and "__setattr__()".) This is '
'done both for\n'
' efficiency reasons and because otherwise '
'"__getattr__()" would have\n'
' no way to access other attributes of the instance. '
'Note that at\n'
' least for instance variables, you can fake total '
'control by not\n'
' inserting any values in the instance attribute '
'dictionary (but\n'
' instead inserting them in another object). See the\n'
' "__getattribute__()" method below for a way to '
'actually get total\n'
' control over attribute access.\n'
'\n'
'object.__getattribute__(self, name)\n'
'\n'
' Called unconditionally to implement attribute '
'accesses for\n'
' instances of the class. If the class also defines '
'"__getattr__()",\n'
' the latter will not be called unless '
'"__getattribute__()" either\n'
' calls it explicitly or raises an "AttributeError". '
'This method\n'
' should return the (computed) attribute value or raise '
'an\n'
' "AttributeError" exception. In order to avoid '
'infinite recursion in\n'
' this method, its implementation should always call '
'the base class\n'
' method with the same name to access any attributes it '
'needs, for\n'
' example, "object.__getattribute__(self, name)".\n'
'\n'
' Note: This method may still be bypassed when looking '
'up special\n'
' methods as the result of implicit invocation via '
'language syntax\n'
' or built-in functions. See Special method lookup.\n'
'\n'
'object.__setattr__(self, name, value)\n'
'\n'
' Called when an attribute assignment is attempted. '
'This is called\n'
' instead of the normal mechanism (i.e. store the value '
'in the\n'
' instance dictionary). *name* is the attribute name, '
'*value* is the\n'
' value to be assigned to it.\n'
'\n'
' If "__setattr__()" wants to assign to an instance '
'attribute, it\n'
' should call the base class method with the same name, '
'for example,\n'
' "object.__setattr__(self, name, value)".\n'
'\n'
'object.__delattr__(self, name)\n'
'\n'
' Like "__setattr__()" but for attribute deletion '
'instead of\n'
' assignment. This should only be implemented if "del '
'obj.name" is\n'
' meaningful for the object.\n'
'\n'
'object.__dir__(self)\n'
'\n'
' Called when "dir()" is called on the object. A '
'sequence must be\n'
' returned. "dir()" converts the returned sequence to a '
'list and\n'
' sorts it.\n'
'\n'
'\n'
'Customizing module attribute access\n'
'===================================\n'
'\n'
'For a more fine grained customization of the module '
'behavior (setting\n'
'attributes, properties, etc.), one can set the '
'"__class__" attribute\n'
'of a module object to a subclass of "types.ModuleType". '
'For example:\n'
'\n'
' import sys\n'
' from types import ModuleType\n'
'\n'
' class VerboseModule(ModuleType):\n'
' def __repr__(self):\n'
" return f'Verbose {self.__name__}'\n"
'\n'
' def __setattr__(self, attr, value):\n'
" print(f'Setting {attr}...')\n"
' setattr(self, attr, value)\n'
'\n'
' sys.modules[__name__].__class__ = VerboseModule\n'
'\n'
'Note: Setting module "__class__" only affects lookups '
'made using the\n'
' attribute access syntax – directly accessing the '
'module globals\n'
' (whether by code within the module, or via a reference '
'to the\n'
' module’s globals dictionary) is unaffected.\n'
'\n'
'Changed in version 3.5: "__class__" module attribute is '
'now writable.\n'
'\n'
'\n'
'Implementing Descriptors\n'
'========================\n'
'\n'
'The following methods only apply when an instance of the '
'class\n'
'containing the method (a so-called *descriptor* class) '
'appears in an\n'
'*owner* class (the descriptor must be in either the '
'owner’s class\n'
'dictionary or in the class dictionary for one of its '
'parents). In the\n'
'examples below, “the attribute” refers to the attribute '
'whose name is\n'
'the key of the property in the owner class’ "__dict__".\n'
'\n'
'object.__get__(self, instance, owner)\n'
'\n'
' Called to get the attribute of the owner class (class '
'attribute\n'
' access) or of an instance of that class (instance '
'attribute\n'
' access). *owner* is always the owner class, while '
'*instance* is the\n'
' instance that the attribute was accessed through, or '
'"None" when\n'
' the attribute is accessed through the *owner*. This '
'method should\n'
' return the (computed) attribute value or raise an '
'"AttributeError"\n'
' exception.\n'
'\n'
'object.__set__(self, instance, value)\n'
'\n'
' Called to set the attribute on an instance *instance* '
'of the owner\n'
' class to a new value, *value*.\n'
'\n'
'object.__delete__(self, instance)\n'
'\n'
' Called to delete the attribute on an instance '
'*instance* of the\n'
' owner class.\n'
'\n'
'object.__set_name__(self, owner, name)\n'
'\n'
' Called at the time the owning class *owner* is '
'created. The\n'
' descriptor has been assigned to *name*.\n'
'\n'
' New in version 3.6.\n'
'\n'
'The attribute "__objclass__" is interpreted by the '
'"inspect" module as\n'
'specifying the class where this object was defined '
'(setting this\n'
'appropriately can assist in runtime introspection of '
'dynamic class\n'
'attributes). For callables, it may indicate that an '
'instance of the\n'
'given type (or a subclass) is expected or required as '
'the first\n'
'positional argument (for example, CPython sets this '
'attribute for\n'
'unbound methods that are implemented in C).\n'
'\n'
'\n'
'Invoking Descriptors\n'
'====================\n'
'\n'
'In general, a descriptor is an object attribute with '
'“binding\n'
'behavior”, one whose attribute access has been '
'overridden by methods\n'
'in the descriptor protocol: "__get__()", "__set__()", '
'and\n'
'"__delete__()". If any of those methods are defined for '
'an object, it\n'
'is said to be a descriptor.\n'
'\n'
'The default behavior for attribute access is to get, '
'set, or delete\n'
'the attribute from an object’s dictionary. For instance, '
'"a.x" has a\n'
'lookup chain starting with "a.__dict__[\'x\']", then\n'
'"type(a).__dict__[\'x\']", and continuing through the '
'base classes of\n'
'"type(a)" excluding metaclasses.\n'
'\n'
'However, if the looked-up value is an object defining '
'one of the\n'
'descriptor methods, then Python may override the default '
'behavior and\n'
'invoke the descriptor method instead. Where this occurs '
'in the\n'
'precedence chain depends on which descriptor methods '
'were defined and\n'
'how they were called.\n'
'\n'
'The starting point for descriptor invocation is a '
'binding, "a.x". How\n'
'the arguments are assembled depends on "a":\n'
'\n'
'Direct Call\n'
' The simplest and least common call is when user code '
'directly\n'
' invokes a descriptor method: "x.__get__(a)".\n'
'\n'
'Instance Binding\n'
' If binding to an object instance, "a.x" is '
'transformed into the\n'
' call: "type(a).__dict__[\'x\'].__get__(a, type(a))".\n'
'\n'
'Class Binding\n'
' If binding to a class, "A.x" is transformed into the '
'call:\n'
' "A.__dict__[\'x\'].__get__(None, A)".\n'
'\n'
'Super Binding\n'
' If "a" is an instance of "super", then the binding '
'"super(B,\n'
' obj).m()" searches "obj.__class__.__mro__" for the '
'base class "A"\n'
' immediately preceding "B" and then invokes the '
'descriptor with the\n'
' call: "A.__dict__[\'m\'].__get__(obj, '
'obj.__class__)".\n'
'\n'
'For instance bindings, the precedence of descriptor '
'invocation depends\n'
'on the which descriptor methods are defined. A '
'descriptor can define\n'
'any combination of "__get__()", "__set__()" and '
'"__delete__()". If it\n'
'does not define "__get__()", then accessing the '
'attribute will return\n'
'the descriptor object itself unless there is a value in '
'the object’s\n'
'instance dictionary. If the descriptor defines '
'"__set__()" and/or\n'
'"__delete__()", it is a data descriptor; if it defines '
'neither, it is\n'
'a non-data descriptor. Normally, data descriptors '
'define both\n'
'"__get__()" and "__set__()", while non-data descriptors '
'have just the\n'
'"__get__()" method. Data descriptors with "__set__()" '
'and "__get__()"\n'
'defined always override a redefinition in an instance '
'dictionary. In\n'
'contrast, non-data descriptors can be overridden by '
'instances.\n'
'\n'
'Python methods (including "staticmethod()" and '
'"classmethod()") are\n'
'implemented as non-data descriptors. Accordingly, '
'instances can\n'
'redefine and override methods. This allows individual '
'instances to\n'
'acquire behaviors that differ from other instances of '
'the same class.\n'
'\n'
'The "property()" function is implemented as a data '
'descriptor.\n'
'Accordingly, instances cannot override the behavior of a '
'property.\n'
'\n'
'\n'
'__slots__\n'
'=========\n'
'\n'
'*__slots__* allow us to explicitly declare data members '
'(like\n'
'properties) and deny the creation of *__dict__* and '
'*__weakref__*\n'
'(unless explicitly declared in *__slots__* or available '
'in a parent.)\n'
'\n'
'The space saved over using *__dict__* can be '
'significant.\n'
'\n'
'object.__slots__\n'
'\n'
' This class variable can be assigned a string, '
'iterable, or sequence\n'
' of strings with variable names used by instances. '
'*__slots__*\n'
' reserves space for the declared variables and '
'prevents the\n'
' automatic creation of *__dict__* and *__weakref__* '
'for each\n'
' instance.\n'
'\n'
'\n'
'Notes on using *__slots__*\n'
'--------------------------\n'
'\n'
'* When inheriting from a class without *__slots__*, the '
'*__dict__*\n'
' and *__weakref__* attribute of the instances will '
'always be\n'
' accessible.\n'
'\n'
'* Without a *__dict__* variable, instances cannot be '
'assigned new\n'
' variables not listed in the *__slots__* definition. '
'Attempts to\n'
' assign to an unlisted variable name raises '
'"AttributeError". If\n'
' dynamic assignment of new variables is desired, then '
'add\n'
' "\'__dict__\'" to the sequence of strings in the '
'*__slots__*\n'
' declaration.\n'
'\n'
'* Without a *__weakref__* variable for each instance, '
'classes\n'
' defining *__slots__* do not support weak references to '
'its\n'
' instances. If weak reference support is needed, then '
'add\n'
' "\'__weakref__\'" to the sequence of strings in the '
'*__slots__*\n'
' declaration.\n'
'\n'
'* *__slots__* are implemented at the class level by '
'creating\n'
' descriptors (Implementing Descriptors) for each '
'variable name. As a\n'
' result, class attributes cannot be used to set default '
'values for\n'
' instance variables defined by *__slots__*; otherwise, '
'the class\n'
' attribute would overwrite the descriptor assignment.\n'
'\n'
'* The action of a *__slots__* declaration is not limited '
'to the\n'
' class where it is defined. *__slots__* declared in '
'parents are\n'
' available in child classes. However, child subclasses '
'will get a\n'
' *__dict__* and *__weakref__* unless they also define '
'*__slots__*\n'
' (which should only contain names of any *additional* '
'slots).\n'
'\n'
'* If a class defines a slot also defined in a base '
'class, the\n'
' instance variable defined by the base class slot is '
'inaccessible\n'
' (except by retrieving its descriptor directly from the '
'base class).\n'
' This renders the meaning of the program undefined. In '
'the future, a\n'
' check may be added to prevent this.\n'
'\n'
'* Nonempty *__slots__* does not work for classes derived '
'from\n'
' “variable-length” built-in types such as "int", '
'"bytes" and "tuple".\n'
'\n'
'* Any non-string iterable may be assigned to '
'*__slots__*. Mappings\n'
' may also be used; however, in the future, special '
'meaning may be\n'
' assigned to the values corresponding to each key.\n'
'\n'
'* *__class__* assignment works only if both classes have '
'the same\n'
' *__slots__*.\n'
'\n'
'* Multiple inheritance with multiple slotted parent '
'classes can be\n'
' used, but only one parent is allowed to have '
'attributes created by\n'
' slots (the other bases must have empty slot layouts) - '
'violations\n'
' raise "TypeError".\n',
'attribute-references': 'Attribute references\n'
'********************\n'
'\n'
'An attribute reference is a primary followed by a '
'period and a name:\n'
'\n'
' attributeref ::= primary "." identifier\n'
'\n'
'The primary must evaluate to an object of a type '
'that supports\n'
'attribute references, which most objects do. This '
'object is then\n'
'asked to produce the attribute whose name is the '
'identifier. This\n'
'production can be customized by overriding the '
'"__getattr__()" method.\n'
'If this attribute is not available, the exception '
'"AttributeError" is\n'
'raised. Otherwise, the type and value of the object '
'produced is\n'
'determined by the object. Multiple evaluations of '
'the same attribute\n'
'reference may yield different objects.\n',
'augassign': 'Augmented assignment statements\n'
'*******************************\n'
'\n'
'Augmented assignment is the combination, in a single statement, '
'of a\n'
'binary operation and an assignment statement:\n'
'\n'
' augmented_assignment_stmt ::= augtarget augop '
'(expression_list | yield_expression)\n'
' augtarget ::= identifier | attributeref | '
'subscription | slicing\n'
' augop ::= "+=" | "-=" | "*=" | "@=" | '
'"/=" | "//=" | "%=" | "**="\n'
' | ">>=" | "<<=" | "&=" | "^=" | "|="\n'
'\n'
'(See section Primaries for the syntax definitions of the last '
'three\n'
'symbols.)\n'
'\n'
'An augmented assignment evaluates the target (which, unlike '
'normal\n'
'assignment statements, cannot be an unpacking) and the '
'expression\n'
'list, performs the binary operation specific to the type of '
'assignment\n'
'on the two operands, and assigns the result to the original '
'target.\n'
'The target is only evaluated once.\n'
'\n'
'An augmented assignment expression like "x += 1" can be '
'rewritten as\n'
'"x = x + 1" to achieve a similar, but not exactly equal effect. '
'In the\n'
'augmented version, "x" is only evaluated once. Also, when '
'possible,\n'
'the actual operation is performed *in-place*, meaning that '
'rather than\n'
'creating a new object and assigning that to the target, the old '
'object\n'
'is modified instead.\n'
'\n'
'Unlike normal assignments, augmented assignments evaluate the '
'left-\n'
'hand side *before* evaluating the right-hand side. For '
'example, "a[i]\n'
'+= f(x)" first looks-up "a[i]", then it evaluates "f(x)" and '
'performs\n'
'the addition, and lastly, it writes the result back to "a[i]".\n'
'\n'
'With the exception of assigning to tuples and multiple targets '
'in a\n'
'single statement, the assignment done by augmented assignment\n'
'statements is handled the same way as normal assignments. '
'Similarly,\n'
'with the exception of the possible *in-place* behavior, the '
'binary\n'
'operation performed by augmented assignment is the same as the '
'normal\n'
'binary operations.\n'
'\n'
'For targets which are attribute references, the same caveat '
'about\n'
'class and instance attributes applies as for regular '
'assignments.\n',
'binary': 'Binary arithmetic operations\n'
'****************************\n'
'\n'
'The binary arithmetic operations have the conventional priority\n'
'levels. Note that some of these operations also apply to certain '
'non-\n'
'numeric types. Apart from the power operator, there are only two\n'
'levels, one for multiplicative operators and one for additive\n'
'operators:\n'
'\n'
' m_expr ::= u_expr | m_expr "*" u_expr | m_expr "@" m_expr |\n'
' m_expr "//" u_expr| m_expr "/" u_expr |\n'
' m_expr "%" u_expr\n'
' a_expr ::= m_expr | a_expr "+" m_expr | a_expr "-" m_expr\n'
'\n'
'The "*" (multiplication) operator yields the product of its '
'arguments.\n'
'The arguments must either both be numbers, or one argument must be '
'an\n'
'integer and the other must be a sequence. In the former case, the\n'
'numbers are converted to a common type and then multiplied '
'together.\n'
'In the latter case, sequence repetition is performed; a negative\n'
'repetition factor yields an empty sequence.\n'
'\n'
'The "@" (at) operator is intended to be used for matrix\n'
'multiplication. No builtin Python types implement this operator.\n'
'\n'
'New in version 3.5.\n'
'\n'
'The "/" (division) and "//" (floor division) operators yield the\n'
'quotient of their arguments. The numeric arguments are first\n'
'converted to a common type. Division of integers yields a float, '
'while\n'
'floor division of integers results in an integer; the result is '
'that\n'
'of mathematical division with the ‘floor’ function applied to the\n'
'result. Division by zero raises the "ZeroDivisionError" '
'exception.\n'
'\n'
'The "%" (modulo) operator yields the remainder from the division '
'of\n'
'the first argument by the second. The numeric arguments are '
'first\n'
'converted to a common type. A zero right argument raises the\n'
'"ZeroDivisionError" exception. The arguments may be floating '
'point\n'
'numbers, e.g., "3.14%0.7" equals "0.34" (since "3.14" equals '
'"4*0.7 +\n'
'0.34".) The modulo operator always yields a result with the same '
'sign\n'
'as its second operand (or zero); the absolute value of the result '
'is\n'
'strictly smaller than the absolute value of the second operand '
'[1].\n'
'\n'
'The floor division and modulo operators are connected by the '
'following\n'
'identity: "x == (x//y)*y + (x%y)". Floor division and modulo are '
'also\n'
'connected with the built-in function "divmod()": "divmod(x, y) ==\n'
'(x//y, x%y)". [2].\n'
'\n'
'In addition to performing the modulo operation on numbers, the '
'"%"\n'
'operator is also overloaded by string objects to perform '
'old-style\n'
'string formatting (also known as interpolation). The syntax for\n'
'string formatting is described in the Python Library Reference,\n'
'section printf-style String Formatting.\n'
'\n'
'The floor division operator, the modulo operator, and the '
'"divmod()"\n'
'function are not defined for complex numbers. Instead, convert to '
'a\n'
'floating point number using the "abs()" function if appropriate.\n'
'\n'
'The "+" (addition) operator yields the sum of its arguments. The\n'
'arguments must either both be numbers or both be sequences of the '
'same\n'
'type. In the former case, the numbers are converted to a common '
'type\n'
'and then added together. In the latter case, the sequences are\n'
'concatenated.\n'
'\n'
'The "-" (subtraction) operator yields the difference of its '
'arguments.\n'
'The numeric arguments are first converted to a common type.\n',
'bitwise': 'Binary bitwise operations\n'
'*************************\n'
'\n'
'Each of the three bitwise operations has a different priority '
'level:\n'
'\n'
' and_expr ::= shift_expr | and_expr "&" shift_expr\n'
' xor_expr ::= and_expr | xor_expr "^" and_expr\n'
' or_expr ::= xor_expr | or_expr "|" xor_expr\n'
'\n'
'The "&" operator yields the bitwise AND of its arguments, which '
'must\n'
'be integers.\n'
'\n'
'The "^" operator yields the bitwise XOR (exclusive OR) of its\n'
'arguments, which must be integers.\n'
'\n'
'The "|" operator yields the bitwise (inclusive) OR of its '
'arguments,\n'
'which must be integers.\n',
'bltin-code-objects': 'Code Objects\n'
'************\n'
'\n'
'Code objects are used by the implementation to '
'represent “pseudo-\n'
'compiled” executable Python code such as a function '
'body. They differ\n'
'from function objects because they don’t contain a '
'reference to their\n'
'global execution environment. Code objects are '
'returned by the built-\n'
'in "compile()" function and can be extracted from '
'function objects\n'
'through their "__code__" attribute. See also the '
'"code" module.\n'
'\n'
'A code object can be executed or evaluated by passing '
'it (instead of a\n'
'source string) to the "exec()" or "eval()" built-in '
'functions.\n'
'\n'
'See The standard type hierarchy for more '
'information.\n',
'bltin-ellipsis-object': 'The Ellipsis Object\n'
'*******************\n'
'\n'
'This object is commonly used by slicing (see '
'Slicings). It supports\n'
'no special operations. There is exactly one '
'ellipsis object, named\n'
'"Ellipsis" (a built-in name). "type(Ellipsis)()" '
'produces the\n'
'"Ellipsis" singleton.\n'
'\n'
'It is written as "Ellipsis" or "...".\n',
'bltin-null-object': 'The Null Object\n'
'***************\n'
'\n'
'This object is returned by functions that don’t '
'explicitly return a\n'
'value. It supports no special operations. There is '
'exactly one null\n'
'object, named "None" (a built-in name). "type(None)()" '
'produces the\n'
'same singleton.\n'
'\n'
'It is written as "None".\n',
'bltin-type-objects': 'Type Objects\n'
'************\n'
'\n'
'Type objects represent the various object types. An '
'object’s type is\n'
'accessed by the built-in function "type()". There are '
'no special\n'
'operations on types. The standard module "types" '
'defines names for\n'
'all standard built-in types.\n'
'\n'
'Types are written like this: "<class \'int\'>".\n',
'booleans': 'Boolean operations\n'
'******************\n'
'\n'
' or_test ::= and_test | or_test "or" and_test\n'
' and_test ::= not_test | and_test "and" not_test\n'
' not_test ::= comparison | "not" not_test\n'
'\n'
'In the context of Boolean operations, and also when expressions '
'are\n'
'used by control flow statements, the following values are '
'interpreted\n'
'as false: "False", "None", numeric zero of all types, and empty\n'
'strings and containers (including strings, tuples, lists,\n'
'dictionaries, sets and frozensets). All other values are '
'interpreted\n'
'as true. User-defined objects can customize their truth value '
'by\n'
'providing a "__bool__()" method.\n'
'\n'
'The operator "not" yields "True" if its argument is false, '
'"False"\n'
'otherwise.\n'
'\n'
'The expression "x and y" first evaluates *x*; if *x* is false, '
'its\n'
'value is returned; otherwise, *y* is evaluated and the resulting '
'value\n'
'is returned.\n'
'\n'
'The expression "x or y" first evaluates *x*; if *x* is true, its '
'value\n'
'is returned; otherwise, *y* is evaluated and the resulting value '
'is\n'
'returned.\n'
'\n'
'(Note that neither "and" nor "or" restrict the value and type '
'they\n'
'return to "False" and "True", but rather return the last '
'evaluated\n'
'argument. This is sometimes useful, e.g., if "s" is a string '
'that\n'
'should be replaced by a default value if it is empty, the '
'expression\n'
'"s or \'foo\'" yields the desired value. Because "not" has to '
'create a\n'
'new value, it returns a boolean value regardless of the type of '
'its\n'
'argument (for example, "not \'foo\'" produces "False" rather '
'than "\'\'".)\n',
'break': 'The "break" statement\n'
'*********************\n'
'\n'
' break_stmt ::= "break"\n'
'\n'
'"break" may only occur syntactically nested in a "for" or "while"\n'
'loop, but not nested in a function or class definition within that\n'
'loop.\n'
'\n'
'It terminates the nearest enclosing loop, skipping the optional '
'"else"\n'
'clause if the loop has one.\n'
'\n'
'If a "for" loop is terminated by "break", the loop control target\n'
'keeps its current value.\n'
'\n'
'When "break" passes control out of a "try" statement with a '
'"finally"\n'
'clause, that "finally" clause is executed before really leaving '
'the\n'
'loop.\n',
'callable-types': 'Emulating callable objects\n'
'**************************\n'
'\n'
'object.__call__(self[, args...])\n'
'\n'
' Called when the instance is “called” as a function; if '
'this method\n'
' is defined, "x(arg1, arg2, ...)" is a shorthand for\n'
' "x.__call__(arg1, arg2, ...)".\n',
'calls': 'Calls\n'
'*****\n'
'\n'
'A call calls a callable object (e.g., a *function*) with a '
'possibly\n'
'empty series of *arguments*:\n'
'\n'
' call ::= primary "(" [argument_list [","] | '
'comprehension] ")"\n'
' argument_list ::= positional_arguments ["," '
'starred_and_keywords]\n'
' ["," keywords_arguments]\n'
' | starred_and_keywords ["," '
'keywords_arguments]\n'
' | keywords_arguments\n'
' positional_arguments ::= ["*"] expression ("," ["*"] '
'expression)*\n'
' starred_and_keywords ::= ("*" expression | keyword_item)\n'
' ("," "*" expression | "," '
'keyword_item)*\n'
' keywords_arguments ::= (keyword_item | "**" expression)\n'
' ("," keyword_item | "," "**" '
'expression)*\n'
' keyword_item ::= identifier "=" expression\n'
'\n'
'An optional trailing comma may be present after the positional and\n'
'keyword arguments but does not affect the semantics.\n'
'\n'
'The primary must evaluate to a callable object (user-defined\n'
'functions, built-in functions, methods of built-in objects, class\n'
'objects, methods of class instances, and all objects having a\n'
'"__call__()" method are callable). All argument expressions are\n'
'evaluated before the call is attempted. Please refer to section\n'
'Function definitions for the syntax of formal *parameter* lists.\n'
'\n'
'If keyword arguments are present, they are first converted to\n'
'positional arguments, as follows. First, a list of unfilled slots '
'is\n'
'created for the formal parameters. If there are N positional\n'
'arguments, they are placed in the first N slots. Next, for each\n'
'keyword argument, the identifier is used to determine the\n'
'corresponding slot (if the identifier is the same as the first '
'formal\n'
'parameter name, the first slot is used, and so on). If the slot '
'is\n'
'already filled, a "TypeError" exception is raised. Otherwise, the\n'
'value of the argument is placed in the slot, filling it (even if '
'the\n'
'expression is "None", it fills the slot). When all arguments have\n'
'been processed, the slots that are still unfilled are filled with '
'the\n'
'corresponding default value from the function definition. '
'(Default\n'
'values are calculated, once, when the function is defined; thus, a\n'
'mutable object such as a list or dictionary used as default value '
'will\n'
'be shared by all calls that don’t specify an argument value for '
'the\n'
'corresponding slot; this should usually be avoided.) If there are '
'any\n'
'unfilled slots for which no default value is specified, a '
'"TypeError"\n'
'exception is raised. Otherwise, the list of filled slots is used '
'as\n'
'the argument list for the call.\n'
'\n'
'**CPython implementation detail:** An implementation may provide\n'
'built-in functions whose positional parameters do not have names, '
'even\n'
'if they are ‘named’ for the purpose of documentation, and which\n'
'therefore cannot be supplied by keyword. In CPython, this is the '
'case\n'
'for functions implemented in C that use "PyArg_ParseTuple()" to '
'parse\n'
'their arguments.\n'
'\n'
'If there are more positional arguments than there are formal '
'parameter\n'
'slots, a "TypeError" exception is raised, unless a formal '
'parameter\n'
'using the syntax "*identifier" is present; in this case, that '
'formal\n'
'parameter receives a tuple containing the excess positional '
'arguments\n'
'(or an empty tuple if there were no excess positional arguments).\n'
'\n'
'If any keyword argument does not correspond to a formal parameter\n'
'name, a "TypeError" exception is raised, unless a formal parameter\n'
'using the syntax "**identifier" is present; in this case, that '
'formal\n'
'parameter receives a dictionary containing the excess keyword\n'
'arguments (using the keywords as keys and the argument values as\n'
'corresponding values), or a (new) empty dictionary if there were '
'no\n'
'excess keyword arguments.\n'
'\n'
'If the syntax "*expression" appears in the function call, '
'"expression"\n'
'must evaluate to an *iterable*. Elements from these iterables are\n'
'treated as if they were additional positional arguments. For the '
'call\n'
'"f(x1, x2, *y, x3, x4)", if *y* evaluates to a sequence *y1*, …, '
'*yM*,\n'
'this is equivalent to a call with M+4 positional arguments *x1*, '
'*x2*,\n'
'*y1*, …, *yM*, *x3*, *x4*.\n'
'\n'
'A consequence of this is that although the "*expression" syntax '
'may\n'
'appear *after* explicit keyword arguments, it is processed '
'*before*\n'
'the keyword arguments (and any "**expression" arguments – see '
'below).\n'
'So:\n'
'\n'
' >>> def f(a, b):\n'
' ... print(a, b)\n'
' ...\n'
' >>> f(b=1, *(2,))\n'
' 2 1\n'
' >>> f(a=1, *(2,))\n'
' Traceback (most recent call last):\n'
' File "<stdin>", line 1, in <module>\n'
" TypeError: f() got multiple values for keyword argument 'a'\n"
' >>> f(1, *(2,))\n'
' 1 2\n'
'\n'
'It is unusual for both keyword arguments and the "*expression" '
'syntax\n'
'to be used in the same call, so in practice this confusion does '
'not\n'
'arise.\n'
'\n'
'If the syntax "**expression" appears in the function call,\n'
'"expression" must evaluate to a *mapping*, the contents of which '
'are\n'
'treated as additional keyword arguments. If a keyword is already\n'
'present (as an explicit keyword argument, or from another '
'unpacking),\n'
'a "TypeError" exception is raised.\n'
'\n'
'Formal parameters using the syntax "*identifier" or "**identifier"\n'
'cannot be used as positional argument slots or as keyword argument\n'
'names.\n'
'\n'
'Changed in version 3.5: Function calls accept any number of "*" '
'and\n'
'"**" unpackings, positional arguments may follow iterable '
'unpackings\n'
'("*"), and keyword arguments may follow dictionary unpackings '
'("**").\n'
'Originally proposed by **PEP 448**.\n'
'\n'
'A call always returns some value, possibly "None", unless it raises '
'an\n'
'exception. How this value is computed depends on the type of the\n'
'callable object.\n'
'\n'
'If it is—\n'
'\n'
'a user-defined function:\n'
' The code block for the function is executed, passing it the\n'
' argument list. The first thing the code block will do is bind '
'the\n'
' formal parameters to the arguments; this is described in '
'section\n'
' Function definitions. When the code block executes a "return"\n'
' statement, this specifies the return value of the function '
'call.\n'
'\n'
'a built-in function or method:\n'
' The result is up to the interpreter; see Built-in Functions for '
'the\n'
' descriptions of built-in functions and methods.\n'
'\n'
'a class object:\n'
' A new instance of that class is returned.\n'
'\n'
'a class instance method:\n'
' The corresponding user-defined function is called, with an '
'argument\n'
' list that is one longer than the argument list of the call: the\n'
' instance becomes the first argument.\n'
'\n'
'a class instance:\n'
' The class must define a "__call__()" method; the effect is then '
'the\n'
' same as if that method was called.\n',
'class': 'Class definitions\n'
'*****************\n'
'\n'
'A class definition defines a class object (see section The '
'standard\n'
'type hierarchy):\n'
'\n'
' classdef ::= [decorators] "class" classname [inheritance] ":" '
'suite\n'
' inheritance ::= "(" [argument_list] ")"\n'
' classname ::= identifier\n'
'\n'
'A class definition is an executable statement. The inheritance '
'list\n'
'usually gives a list of base classes (see Metaclasses for more\n'
'advanced uses), so each item in the list should evaluate to a '
'class\n'
'object which allows subclassing. Classes without an inheritance '
'list\n'
'inherit, by default, from the base class "object"; hence,\n'
'\n'
' class Foo:\n'
' pass\n'
'\n'
'is equivalent to\n'
'\n'
' class Foo(object):\n'
' pass\n'
'\n'
'The class’s suite is then executed in a new execution frame (see\n'
'Naming and binding), using a newly created local namespace and the\n'
'original global namespace. (Usually, the suite contains mostly\n'
'function definitions.) When the class’s suite finishes execution, '
'its\n'
'execution frame is discarded but its local namespace is saved. [4] '
'A\n'
'class object is then created using the inheritance list for the '
'base\n'
'classes and the saved local namespace for the attribute '
'dictionary.\n'
'The class name is bound to this class object in the original local\n'
'namespace.\n'
'\n'
'The order in which attributes are defined in the class body is\n'
'preserved in the new class’s "__dict__". Note that this is '
'reliable\n'
'only right after the class is created and only for classes that '
'were\n'
'defined using the definition syntax.\n'
'\n'
'Class creation can be customized heavily using metaclasses.\n'
'\n'
'Classes can also be decorated: just like when decorating '
'functions,\n'
'\n'
' @f1(arg)\n'
' @f2\n'
' class Foo: pass\n'
'\n'
'is roughly equivalent to\n'
'\n'
' class Foo: pass\n'
' Foo = f1(arg)(f2(Foo))\n'
'\n'
'The evaluation rules for the decorator expressions are the same as '
'for\n'
'function decorators. The result is then bound to the class name.\n'
'\n'
'**Programmer’s note:** Variables defined in the class definition '
'are\n'
'class attributes; they are shared by instances. Instance '
'attributes\n'
'can be set in a method with "self.name = value". Both class and\n'
'instance attributes are accessible through the notation '
'“"self.name"”,\n'
'and an instance attribute hides a class attribute with the same '
'name\n'
'when accessed in this way. Class attributes can be used as '
'defaults\n'
'for instance attributes, but using mutable values there can lead '
'to\n'
'unexpected results. Descriptors can be used to create instance\n'
'variables with different implementation details.\n'
'\n'
'See also: **PEP 3115** - Metaclasses in Python 3 **PEP 3129** -\n'
' Class Decorators\n',
'comparisons': 'Comparisons\n'
'***********\n'
'\n'
'Unlike C, all comparison operations in Python have the same '
'priority,\n'
'which is lower than that of any arithmetic, shifting or '
'bitwise\n'
'operation. Also unlike C, expressions like "a < b < c" have '
'the\n'
'interpretation that is conventional in mathematics:\n'
'\n'
' comparison ::= or_expr ( comp_operator or_expr )*\n'
' comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "!="\n'
' | "is" ["not"] | ["not"] "in"\n'
'\n'
'Comparisons yield boolean values: "True" or "False".\n'
'\n'
'Comparisons can be chained arbitrarily, e.g., "x < y <= z" '
'is\n'
'equivalent to "x < y and y <= z", except that "y" is '
'evaluated only\n'
'once (but in both cases "z" is not evaluated at all when "x < '
'y" is\n'
'found to be false).\n'
'\n'
'Formally, if *a*, *b*, *c*, …, *y*, *z* are expressions and '
'*op1*,\n'
'*op2*, …, *opN* are comparison operators, then "a op1 b op2 c '
'... y\n'
'opN z" is equivalent to "a op1 b and b op2 c and ... y opN '
'z", except\n'
'that each expression is evaluated at most once.\n'
'\n'
'Note that "a op1 b op2 c" doesn’t imply any kind of '
'comparison between\n'
'*a* and *c*, so that, e.g., "x < y > z" is perfectly legal '
'(though\n'
'perhaps not pretty).\n'
'\n'
'\n'
'Value comparisons\n'
'=================\n'
'\n'
'The operators "<", ">", "==", ">=", "<=", and "!=" compare '
'the values\n'
'of two objects. The objects do not need to have the same '
'type.\n'
'\n'
'Chapter Objects, values and types states that objects have a '
'value (in\n'
'addition to type and identity). The value of an object is a '
'rather\n'
'abstract notion in Python: For example, there is no canonical '
'access\n'
'method for an object’s value. Also, there is no requirement '
'that the\n'
'value of an object should be constructed in a particular way, '
'e.g.\n'
'comprised of all its data attributes. Comparison operators '
'implement a\n'
'particular notion of what the value of an object is. One can '
'think of\n'
'them as defining the value of an object indirectly, by means '
'of their\n'
'comparison implementation.\n'
'\n'
'Because all types are (direct or indirect) subtypes of '
'"object", they\n'
'inherit the default comparison behavior from "object". Types '
'can\n'
'customize their comparison behavior by implementing *rich '
'comparison\n'
'methods* like "__lt__()", described in Basic customization.\n'
'\n'
'The default behavior for equality comparison ("==" and "!=") '
'is based\n'
'on the identity of the objects. Hence, equality comparison '
'of\n'
'instances with the same identity results in equality, and '
'equality\n'
'comparison of instances with different identities results in\n'
'inequality. A motivation for this default behavior is the '
'desire that\n'
'all objects should be reflexive (i.e. "x is y" implies "x == '
'y").\n'
'\n'
'A default order comparison ("<", ">", "<=", and ">=") is not '
'provided;\n'
'an attempt raises "TypeError". A motivation for this default '
'behavior\n'
'is the lack of a similar invariant as for equality.\n'
'\n'
'The behavior of the default equality comparison, that '
'instances with\n'
'different identities are always unequal, may be in contrast '
'to what\n'
'types will need that have a sensible definition of object '
'value and\n'
'value-based equality. Such types will need to customize '
'their\n'
'comparison behavior, and in fact, a number of built-in types '
'have done\n'
'that.\n'
'\n'
'The following list describes the comparison behavior of the '
'most\n'
'important built-in types.\n'
'\n'
'* Numbers of built-in numeric types (Numeric Types — int, '
'float,\n'
' complex) and of the standard library types '
'"fractions.Fraction" and\n'
' "decimal.Decimal" can be compared within and across their '
'types,\n'
' with the restriction that complex numbers do not support '
'order\n'
' comparison. Within the limits of the types involved, they '
'compare\n'
' mathematically (algorithmically) correct without loss of '
'precision.\n'
'\n'
' The not-a-number values "float(\'NaN\')" and '
'"Decimal(\'NaN\')" are\n'
' special. They are identical to themselves ("x is x" is '
'true) but\n'
' are not equal to themselves ("x == x" is false). '
'Additionally,\n'
' comparing any number to a not-a-number value will return '
'"False".\n'
' For example, both "3 < float(\'NaN\')" and "float(\'NaN\') '
'< 3" will\n'
' return "False".\n'
'\n'
'* Binary sequences (instances of "bytes" or "bytearray") can '
'be\n'
' compared within and across their types. They compare\n'
' lexicographically using the numeric values of their '
'elements.\n'
'\n'
'* Strings (instances of "str") compare lexicographically '
'using the\n'
' numerical Unicode code points (the result of the built-in '
'function\n'
' "ord()") of their characters. [3]\n'
'\n'
' Strings and binary sequences cannot be directly compared.\n'
'\n'
'* Sequences (instances of "tuple", "list", or "range") can '
'be\n'
' compared only within each of their types, with the '
'restriction that\n'
' ranges do not support order comparison. Equality '
'comparison across\n'
' these types results in inequality, and ordering comparison '
'across\n'
' these types raises "TypeError".\n'
'\n'
' Sequences compare lexicographically using comparison of\n'
' corresponding elements, whereby reflexivity of the elements '
'is\n'
' enforced.\n'
'\n'
' In enforcing reflexivity of elements, the comparison of '
'collections\n'
' assumes that for a collection element "x", "x == x" is '
'always true.\n'
' Based on that assumption, element identity is compared '
'first, and\n'
' element comparison is performed only for distinct '
'elements. This\n'
' approach yields the same result as a strict element '
'comparison\n'
' would, if the compared elements are reflexive. For '
'non-reflexive\n'
' elements, the result is different than for strict element\n'
' comparison, and may be surprising: The non-reflexive '
'not-a-number\n'
' values for example result in the following comparison '
'behavior when\n'
' used in a list:\n'
'\n'
" >>> nan = float('NaN')\n"
' >>> nan is nan\n'
' True\n'
' >>> nan == nan\n'
' False <-- the defined non-reflexive '
'behavior of NaN\n'
' >>> [nan] == [nan]\n'
' True <-- list enforces reflexivity and '
'tests identity first\n'
'\n'
' Lexicographical comparison between built-in collections '
'works as\n'
' follows:\n'
'\n'
' * For two collections to compare equal, they must be of the '
'same\n'
' type, have the same length, and each pair of '
'corresponding\n'
' elements must compare equal (for example, "[1,2] == '
'(1,2)" is\n'
' false because the type is not the same).\n'
'\n'
' * Collections that support order comparison are ordered the '
'same\n'
' as their first unequal elements (for example, "[1,2,x] <= '
'[1,2,y]"\n'
' has the same value as "x <= y"). If a corresponding '
'element does\n'
' not exist, the shorter collection is ordered first (for '
'example,\n'
' "[1,2] < [1,2,3]" is true).\n'
'\n'
'* Mappings (instances of "dict") compare equal if and only if '
'they\n'
' have equal *(key, value)* pairs. Equality comparison of the '
'keys and\n'
' values enforces reflexivity.\n'
'\n'
' Order comparisons ("<", ">", "<=", and ">=") raise '
'"TypeError".\n'
'\n'
'* Sets (instances of "set" or "frozenset") can be compared '
'within\n'
' and across their types.\n'
'\n'
' They define order comparison operators to mean subset and '
'superset\n'
' tests. Those relations do not define total orderings (for '
'example,\n'
' the two sets "{1,2}" and "{2,3}" are not equal, nor subsets '
'of one\n'
' another, nor supersets of one another). Accordingly, sets '
'are not\n'
' appropriate arguments for functions which depend on total '
'ordering\n'
' (for example, "min()", "max()", and "sorted()" produce '
'undefined\n'
' results given a list of sets as inputs).\n'
'\n'
' Comparison of sets enforces reflexivity of its elements.\n'
'\n'
'* Most other built-in types have no comparison methods '
'implemented,\n'
' so they inherit the default comparison behavior.\n'
'\n'
'User-defined classes that customize their comparison behavior '
'should\n'
'follow some consistency rules, if possible:\n'
'\n'
'* Equality comparison should be reflexive. In other words, '
'identical\n'
' objects should compare equal:\n'
'\n'
' "x is y" implies "x == y"\n'
'\n'
'* Comparison should be symmetric. In other words, the '
'following\n'
' expressions should have the same result:\n'
'\n'
' "x == y" and "y == x"\n'
'\n'
' "x != y" and "y != x"\n'
'\n'
' "x < y" and "y > x"\n'
'\n'
' "x <= y" and "y >= x"\n'
'\n'
'* Comparison should be transitive. The following '
'(non-exhaustive)\n'
' examples illustrate that:\n'
'\n'
' "x > y and y > z" implies "x > z"\n'
'\n'
' "x < y and y <= z" implies "x < z"\n'
'\n'
'* Inverse comparison should result in the boolean negation. '
'In other\n'
' words, the following expressions should have the same '
'result:\n'
'\n'
' "x == y" and "not x != y"\n'
'\n'
' "x < y" and "not x >= y" (for total ordering)\n'
'\n'
' "x > y" and "not x <= y" (for total ordering)\n'
'\n'
' The last two expressions apply to totally ordered '
'collections (e.g.\n'
' to sequences, but not to sets or mappings). See also the\n'
' "total_ordering()" decorator.\n'
'\n'
'* The "hash()" result should be consistent with equality. '
'Objects\n'
' that are equal should either have the same hash value, or '
'be marked\n'
' as unhashable.\n'
'\n'
'Python does not enforce these consistency rules. In fact, '
'the\n'
'not-a-number values are an example for not following these '
'rules.\n'
'\n'
'\n'
'Membership test operations\n'
'==========================\n'
'\n'
'The operators "in" and "not in" test for membership. "x in '
's"\n'
'evaluates to "True" if *x* is a member of *s*, and "False" '
'otherwise.\n'
'"x not in s" returns the negation of "x in s". All built-in '
'sequences\n'
'and set types support this as well as dictionary, for which '
'"in" tests\n'
'whether the dictionary has a given key. For container types '
'such as\n'
'list, tuple, set, frozenset, dict, or collections.deque, the\n'
'expression "x in y" is equivalent to "any(x is e or x == e '
'for e in\n'
'y)".\n'
'\n'
'For the string and bytes types, "x in y" is "True" if and '
'only if *x*\n'
'is a substring of *y*. An equivalent test is "y.find(x) != '
'-1".\n'
'Empty strings are always considered to be a substring of any '
'other\n'
'string, so """ in "abc"" will return "True".\n'
'\n'
'For user-defined classes which define the "__contains__()" '
'method, "x\n'
'in y" returns "True" if "y.__contains__(x)" returns a true '
'value, and\n'
'"False" otherwise.\n'
'\n'
'For user-defined classes which do not define "__contains__()" '
'but do\n'
'define "__iter__()", "x in y" is "True" if some value "z" '
'with "x ==\n'
'z" is produced while iterating over "y". If an exception is '
'raised\n'
'during the iteration, it is as if "in" raised that '
'exception.\n'
'\n'
'Lastly, the old-style iteration protocol is tried: if a class '
'defines\n'
'"__getitem__()", "x in y" is "True" if and only if there is a '
'non-\n'
'negative integer index *i* such that "x == y[i]", and all '
'lower\n'
'integer indices do not raise "IndexError" exception. (If any '
'other\n'
'exception is raised, it is as if "in" raised that '
'exception).\n'
'\n'
'The operator "not in" is defined to have the inverse true '
'value of\n'
'"in".\n'
'\n'
'\n'
'Identity comparisons\n'
'====================\n'
'\n'
'The operators "is" and "is not" test for object identity: "x '
'is y" is\n'
'true if and only if *x* and *y* are the same object. Object '
'identity\n'
'is determined using the "id()" function. "x is not y" yields '
'the\n'
'inverse truth value. [4]\n',
'compound': 'Compound statements\n'
'*******************\n'
'\n'
'Compound statements contain (groups of) other statements; they '
'affect\n'
'or control the execution of those other statements in some way. '
'In\n'
'general, compound statements span multiple lines, although in '
'simple\n'
'incarnations a whole compound statement may be contained in one '
'line.\n'
'\n'
'The "if", "while" and "for" statements implement traditional '
'control\n'
'flow constructs. "try" specifies exception handlers and/or '
'cleanup\n'
'code for a group of statements, while the "with" statement '
'allows the\n'
'execution of initialization and finalization code around a block '
'of\n'
'code. Function and class definitions are also syntactically '
'compound\n'
'statements.\n'
'\n'
'A compound statement consists of one or more ‘clauses.’ A '
'clause\n'
'consists of a header and a ‘suite.’ The clause headers of a\n'
'particular compound statement are all at the same indentation '
'level.\n'
'Each clause header begins with a uniquely identifying keyword '
'and ends\n'
'with a colon. A suite is a group of statements controlled by a\n'
'clause. A suite can be one or more semicolon-separated simple\n'
'statements on the same line as the header, following the '
'header’s\n'
'colon, or it can be one or more indented statements on '
'subsequent\n'
'lines. Only the latter form of a suite can contain nested '
'compound\n'
'statements; the following is illegal, mostly because it wouldn’t '
'be\n'
'clear to which "if" clause a following "else" clause would '
'belong:\n'
'\n'
' if test1: if test2: print(x)\n'
'\n'
'Also note that the semicolon binds tighter than the colon in '
'this\n'
'context, so that in the following example, either all or none of '
'the\n'
'"print()" calls are executed:\n'
'\n'
' if x < y < z: print(x); print(y); print(z)\n'
'\n'
'Summarizing:\n'
'\n'
' compound_stmt ::= if_stmt\n'
' | while_stmt\n'
' | for_stmt\n'
' | try_stmt\n'
' | with_stmt\n'
' | funcdef\n'
' | classdef\n'
' | async_with_stmt\n'
' | async_for_stmt\n'
' | async_funcdef\n'
' suite ::= stmt_list NEWLINE | NEWLINE INDENT '
'statement+ DEDENT\n'
' statement ::= stmt_list NEWLINE | compound_stmt\n'
' stmt_list ::= simple_stmt (";" simple_stmt)* [";"]\n'
'\n'
'Note that statements always end in a "NEWLINE" possibly followed '
'by a\n'
'"DEDENT". Also note that optional continuation clauses always '
'begin\n'
'with a keyword that cannot start a statement, thus there are no\n'
'ambiguities (the ‘dangling "else"’ problem is solved in Python '
'by\n'
'requiring nested "if" statements to be indented).\n'
'\n'
'The formatting of the grammar rules in the following sections '
'places\n'
'each clause on a separate line for clarity.\n'
'\n'
'\n'
'The "if" statement\n'
'==================\n'
'\n'
'The "if" statement is used for conditional execution:\n'
'\n'
' if_stmt ::= "if" expression ":" suite\n'
' ( "elif" expression ":" suite )*\n'
' ["else" ":" suite]\n'
'\n'
'It selects exactly one of the suites by evaluating the '
'expressions one\n'
'by one until one is found to be true (see section Boolean '
'operations\n'
'for the definition of true and false); then that suite is '
'executed\n'
'(and no other part of the "if" statement is executed or '
'evaluated).\n'
'If all expressions are false, the suite of the "else" clause, '
'if\n'
'present, is executed.\n'
'\n'
'\n'
'The "while" statement\n'
'=====================\n'
'\n'
'The "while" statement is used for repeated execution as long as '
'an\n'
'expression is true:\n'
'\n'
' while_stmt ::= "while" expression ":" suite\n'
' ["else" ":" suite]\n'
'\n'
'This repeatedly tests the expression and, if it is true, '
'executes the\n'
'first suite; if the expression is false (which may be the first '
'time\n'
'it is tested) the suite of the "else" clause, if present, is '
'executed\n'
'and the loop terminates.\n'
'\n'
'A "break" statement executed in the first suite terminates the '
'loop\n'
'without executing the "else" clause’s suite. A "continue" '
'statement\n'
'executed in the first suite skips the rest of the suite and goes '
'back\n'
'to testing the expression.\n'
'\n'
'\n'
'The "for" statement\n'
'===================\n'
'\n'
'The "for" statement is used to iterate over the elements of a '
'sequence\n'
'(such as a string, tuple or list) or other iterable object:\n'
'\n'
' for_stmt ::= "for" target_list "in" expression_list ":" '
'suite\n'
' ["else" ":" suite]\n'
'\n'
'The expression list is evaluated once; it should yield an '
'iterable\n'
'object. An iterator is created for the result of the\n'
'"expression_list". The suite is then executed once for each '
'item\n'
'provided by the iterator, in the order returned by the '
'iterator. Each\n'
'item in turn is assigned to the target list using the standard '
'rules\n'
'for assignments (see Assignment statements), and then the suite '
'is\n'
'executed. When the items are exhausted (which is immediately '
'when the\n'
'sequence is empty or an iterator raises a "StopIteration" '
'exception),\n'
'the suite in the "else" clause, if present, is executed, and the '
'loop\n'
'terminates.\n'
'\n'
'A "break" statement executed in the first suite terminates the '
'loop\n'
'without executing the "else" clause’s suite. A "continue" '
'statement\n'
'executed in the first suite skips the rest of the suite and '
'continues\n'
'with the next item, or with the "else" clause if there is no '
'next\n'
'item.\n'
'\n'
'The for-loop makes assignments to the variables(s) in the target '
'list.\n'
'This overwrites all previous assignments to those variables '
'including\n'
'those made in the suite of the for-loop:\n'
'\n'
' for i in range(10):\n'
' print(i)\n'
' i = 5 # this will not affect the for-loop\n'
' # because i will be overwritten with '
'the next\n'
' # index in the range\n'
'\n'
'Names in the target list are not deleted when the loop is '
'finished,\n'
'but if the sequence is empty, they will not have been assigned '
'to at\n'
'all by the loop. Hint: the built-in function "range()" returns '
'an\n'
'iterator of integers suitable to emulate the effect of Pascal’s '
'"for i\n'
':= a to b do"; e.g., "list(range(3))" returns the list "[0, 1, '
'2]".\n'
'\n'
'Note: There is a subtlety when the sequence is being modified by '
'the\n'
' loop (this can only occur for mutable sequences, i.e. lists). '
'An\n'
' internal counter is used to keep track of which item is used '
'next,\n'
' and this is incremented on each iteration. When this counter '
'has\n'
' reached the length of the sequence the loop terminates. This '
'means\n'
' that if the suite deletes the current (or a previous) item '
'from the\n'
' sequence, the next item will be skipped (since it gets the '
'index of\n'
' the current item which has already been treated). Likewise, '
'if the\n'
' suite inserts an item in the sequence before the current item, '
'the\n'
' current item will be treated again the next time through the '
'loop.\n'
' This can lead to nasty bugs that can be avoided by making a\n'
' temporary copy using a slice of the whole sequence, e.g.,\n'
'\n'
' for x in a[:]:\n'
' if x < 0: a.remove(x)\n'
'\n'
'\n'
'The "try" statement\n'
'===================\n'
'\n'
'The "try" statement specifies exception handlers and/or cleanup '
'code\n'
'for a group of statements:\n'
'\n'
' try_stmt ::= try1_stmt | try2_stmt\n'
' try1_stmt ::= "try" ":" suite\n'
' ("except" [expression ["as" identifier]] ":" '
'suite)+\n'
' ["else" ":" suite]\n'
' ["finally" ":" suite]\n'
' try2_stmt ::= "try" ":" suite\n'
' "finally" ":" suite\n'
'\n'
'The "except" clause(s) specify one or more exception handlers. '
'When no\n'
'exception occurs in the "try" clause, no exception handler is\n'
'executed. When an exception occurs in the "try" suite, a search '
'for an\n'
'exception handler is started. This search inspects the except '
'clauses\n'
'in turn until one is found that matches the exception. An '
'expression-\n'
'less except clause, if present, must be last; it matches any\n'
'exception. For an except clause with an expression, that '
'expression\n'
'is evaluated, and the clause matches the exception if the '
'resulting\n'
'object is “compatible” with the exception. An object is '
'compatible\n'
'with an exception if it is the class or a base class of the '
'exception\n'
'object or a tuple containing an item compatible with the '
'exception.\n'
'\n'
'If no except clause matches the exception, the search for an '
'exception\n'
'handler continues in the surrounding code and on the invocation '
'stack.\n'
'[1]\n'
'\n'
'If the evaluation of an expression in the header of an except '
'clause\n'
'raises an exception, the original search for a handler is '
'canceled and\n'
'a search starts for the new exception in the surrounding code '
'and on\n'
'the call stack (it is treated as if the entire "try" statement '
'raised\n'
'the exception).\n'
'\n'
'When a matching except clause is found, the exception is '
'assigned to\n'
'the target specified after the "as" keyword in that except '
'clause, if\n'
'present, and the except clause’s suite is executed. All except\n'
'clauses must have an executable block. When the end of this '
'block is\n'
'reached, execution continues normally after the entire try '
'statement.\n'
'(This means that if two nested handlers exist for the same '
'exception,\n'
'and the exception occurs in the try clause of the inner handler, '
'the\n'
'outer handler will not handle the exception.)\n'
'\n'
'When an exception has been assigned using "as target", it is '
'cleared\n'
'at the end of the except clause. This is as if\n'
'\n'
' except E as N:\n'
' foo\n'
'\n'
'was translated to\n'
'\n'
' except E as N:\n'
' try:\n'
' foo\n'
' finally:\n'
' del N\n'
'\n'
'This means the exception must be assigned to a different name to '
'be\n'
'able to refer to it after the except clause. Exceptions are '
'cleared\n'
'because with the traceback attached to them, they form a '
'reference\n'
'cycle with the stack frame, keeping all locals in that frame '
'alive\n'
'until the next garbage collection occurs.\n'
'\n'
'Before an except clause’s suite is executed, details about the\n'
'exception are stored in the "sys" module and can be accessed '
'via\n'
'"sys.exc_info()". "sys.exc_info()" returns a 3-tuple consisting '
'of the\n'
'exception class, the exception instance and a traceback object '
'(see\n'
'section The standard type hierarchy) identifying the point in '
'the\n'
'program where the exception occurred. "sys.exc_info()" values '
'are\n'
'restored to their previous values (before the call) when '
'returning\n'
'from a function that handled an exception.\n'
'\n'
'The optional "else" clause is executed if and when control flows '
'off\n'
'the end of the "try" clause. [2] Exceptions in the "else" clause '
'are\n'
'not handled by the preceding "except" clauses.\n'
'\n'
'If "finally" is present, it specifies a ‘cleanup’ handler. The '
'"try"\n'
'clause is executed, including any "except" and "else" clauses. '
'If an\n'
'exception occurs in any of the clauses and is not handled, the\n'
'exception is temporarily saved. The "finally" clause is '
'executed. If\n'
'there is a saved exception it is re-raised at the end of the '
'"finally"\n'
'clause. If the "finally" clause raises another exception, the '
'saved\n'
'exception is set as the context of the new exception. If the '
'"finally"\n'
'clause executes a "return" or "break" statement, the saved '
'exception\n'
'is discarded:\n'
'\n'
' >>> def f():\n'
' ... try:\n'
' ... 1/0\n'
' ... finally:\n'
' ... return 42\n'
' ...\n'
' >>> f()\n'
' 42\n'
'\n'
'The exception information is not available to the program '
'during\n'
'execution of the "finally" clause.\n'
'\n'
'When a "return", "break" or "continue" statement is executed in '
'the\n'
'"try" suite of a "try"…"finally" statement, the "finally" clause '
'is\n'
'also executed ‘on the way out.’ A "continue" statement is '
'illegal in\n'
'the "finally" clause. (The reason is a problem with the current\n'
'implementation — this restriction may be lifted in the future).\n'
'\n'
'The return value of a function is determined by the last '
'"return"\n'
'statement executed. Since the "finally" clause always executes, '
'a\n'
'"return" statement executed in the "finally" clause will always '
'be the\n'
'last one executed:\n'
'\n'
' >>> def foo():\n'
' ... try:\n'
" ... return 'try'\n"
' ... finally:\n'
" ... return 'finally'\n"
' ...\n'
' >>> foo()\n'
" 'finally'\n"
'\n'
'Additional information on exceptions can be found in section\n'
'Exceptions, and information on using the "raise" statement to '
'generate\n'
'exceptions may be found in section The raise statement.\n'
'\n'
'\n'
'The "with" statement\n'
'====================\n'
'\n'
'The "with" statement is used to wrap the execution of a block '
'with\n'
'methods defined by a context manager (see section With '
'Statement\n'
'Context Managers). This allows common "try"…"except"…"finally" '
'usage\n'
'patterns to be encapsulated for convenient reuse.\n'
'\n'
' with_stmt ::= "with" with_item ("," with_item)* ":" suite\n'
' with_item ::= expression ["as" target]\n'
'\n'
'The execution of the "with" statement with one “item” proceeds '
'as\n'
'follows:\n'
'\n'
'1. The context expression (the expression given in the '
'"with_item")\n'
' is evaluated to obtain a context manager.\n'
'\n'
'2. The context manager’s "__exit__()" is loaded for later use.\n'
'\n'
'3. The context manager’s "__enter__()" method is invoked.\n'
'\n'
'4. If a target was included in the "with" statement, the return\n'
' value from "__enter__()" is assigned to it.\n'
'\n'
' Note: The "with" statement guarantees that if the '
'"__enter__()"\n'
' method returns without an error, then "__exit__()" will '
'always be\n'
' called. Thus, if an error occurs during the assignment to '
'the\n'
' target list, it will be treated the same as an error '
'occurring\n'
' within the suite would be. See step 6 below.\n'
'\n'
'5. The suite is executed.\n'
'\n'
'6. The context manager’s "__exit__()" method is invoked. If an\n'
' exception caused the suite to be exited, its type, value, '
'and\n'
' traceback are passed as arguments to "__exit__()". Otherwise, '
'three\n'
' "None" arguments are supplied.\n'
'\n'
' If the suite was exited due to an exception, and the return '
'value\n'
' from the "__exit__()" method was false, the exception is '
'reraised.\n'
' If the return value was true, the exception is suppressed, '
'and\n'
' execution continues with the statement following the "with"\n'
' statement.\n'
'\n'
' If the suite was exited for any reason other than an '
'exception, the\n'
' return value from "__exit__()" is ignored, and execution '
'proceeds\n'
' at the normal location for the kind of exit that was taken.\n'
'\n'
'With more than one item, the context managers are processed as '
'if\n'
'multiple "with" statements were nested:\n'
'\n'
' with A() as a, B() as b:\n'
' suite\n'
'\n'
'is equivalent to\n'
'\n'
' with A() as a:\n'
' with B() as b:\n'
' suite\n'
'\n'
'Changed in version 3.1: Support for multiple context '
'expressions.\n'
'\n'
'See also:\n'
'\n'
' **PEP 343** - The “with” statement\n'
' The specification, background, and examples for the Python '
'"with"\n'
' statement.\n'
'\n'
'\n'
'Function definitions\n'
'====================\n'
'\n'
'A function definition defines a user-defined function object '
'(see\n'
'section The standard type hierarchy):\n'
'\n'
' funcdef ::= [decorators] "def" funcname "(" '
'[parameter_list] ")" ["->" expression] ":" suite\n'
' decorators ::= decorator+\n'
' decorator ::= "@" dotted_name ["(" '
'[argument_list [","]] ")"] NEWLINE\n'
' dotted_name ::= identifier ("." identifier)*\n'
' parameter_list ::= defparameter ("," defparameter)* '
'["," [parameter_list_starargs]]\n'
' | parameter_list_starargs\n'
' parameter_list_starargs ::= "*" [parameter] ("," '
'defparameter)* ["," ["**" parameter [","]]]\n'
' | "**" parameter [","]\n'
' parameter ::= identifier [":" expression]\n'
' defparameter ::= parameter ["=" expression]\n'
' funcname ::= identifier\n'
'\n'
'A function definition is an executable statement. Its execution '
'binds\n'
'the function name in the current local namespace to a function '
'object\n'
'(a wrapper around the executable code for the function). This\n'
'function object contains a reference to the current global '
'namespace\n'
'as the global namespace to be used when the function is called.\n'
'\n'
'The function definition does not execute the function body; this '
'gets\n'
'executed only when the function is called. [3]\n'
'\n'
'A function definition may be wrapped by one or more *decorator*\n'
'expressions. Decorator expressions are evaluated when the '
'function is\n'
'defined, in the scope that contains the function definition. '
'The\n'
'result must be a callable, which is invoked with the function '
'object\n'
'as the only argument. The returned value is bound to the '
'function name\n'
'instead of the function object. Multiple decorators are applied '
'in\n'
'nested fashion. For example, the following code\n'
'\n'
' @f1(arg)\n'
' @f2\n'
' def func(): pass\n'
'\n'
'is roughly equivalent to\n'
'\n'
' def func(): pass\n'
' func = f1(arg)(f2(func))\n'
'\n'
'except that the original function is not temporarily bound to '
'the name\n'
'"func".\n'
'\n'
'When one or more *parameters* have the form *parameter* "="\n'
'*expression*, the function is said to have “default parameter '
'values.”\n'
'For a parameter with a default value, the corresponding '
'*argument* may\n'
'be omitted from a call, in which case the parameter’s default '
'value is\n'
'substituted. If a parameter has a default value, all following\n'
'parameters up until the “"*"” must also have a default value — '
'this is\n'
'a syntactic restriction that is not expressed by the grammar.\n'
'\n'
'**Default parameter values are evaluated from left to right when '
'the\n'
'function definition is executed.** This means that the '
'expression is\n'
'evaluated once, when the function is defined, and that the same '
'“pre-\n'
'computed” value is used for each call. This is especially '
'important\n'
'to understand when a default parameter is a mutable object, such '
'as a\n'
'list or a dictionary: if the function modifies the object (e.g. '
'by\n'
'appending an item to a list), the default value is in effect '
'modified.\n'
'This is generally not what was intended. A way around this is '
'to use\n'
'"None" as the default, and explicitly test for it in the body of '
'the\n'
'function, e.g.:\n'
'\n'
' def whats_on_the_telly(penguin=None):\n'
' if penguin is None:\n'
' penguin = []\n'
' penguin.append("property of the zoo")\n'
' return penguin\n'
'\n'
'Function call semantics are described in more detail in section '
'Calls.\n'
'A function call always assigns values to all parameters '
'mentioned in\n'
'the parameter list, either from position arguments, from '
'keyword\n'
'arguments, or from default values. If the form “"*identifier"” '
'is\n'
'present, it is initialized to a tuple receiving any excess '
'positional\n'
'parameters, defaulting to the empty tuple. If the form\n'
'“"**identifier"” is present, it is initialized to a new ordered\n'
'mapping receiving any excess keyword arguments, defaulting to a '
'new\n'
'empty mapping of the same type. Parameters after “"*"” or\n'
'“"*identifier"” are keyword-only parameters and may only be '
'passed\n'
'used keyword arguments.\n'
'\n'
'Parameters may have annotations of the form “": expression"” '
'following\n'
'the parameter name. Any parameter may have an annotation even '
'those\n'
'of the form "*identifier" or "**identifier". Functions may '
'have\n'
'“return” annotation of the form “"-> expression"” after the '
'parameter\n'
'list. These annotations can be any valid Python expression and '
'are\n'
'evaluated when the function definition is executed. Annotations '
'may\n'
'be evaluated in a different order than they appear in the source '
'code.\n'
'The presence of annotations does not change the semantics of a\n'
'function. The annotation values are available as values of a\n'
'dictionary keyed by the parameters’ names in the '
'"__annotations__"\n'
'attribute of the function object.\n'
'\n'
'It is also possible to create anonymous functions (functions not '
'bound\n'
'to a name), for immediate use in expressions. This uses lambda\n'
'expressions, described in section Lambdas. Note that the '
'lambda\n'
'expression is merely a shorthand for a simplified function '
'definition;\n'
'a function defined in a “"def"” statement can be passed around '
'or\n'
'assigned to another name just like a function defined by a '
'lambda\n'
'expression. The “"def"” form is actually more powerful since '
'it\n'
'allows the execution of multiple statements and annotations.\n'
'\n'
'**Programmer’s note:** Functions are first-class objects. A '
'“"def"”\n'
'statement executed inside a function definition defines a local\n'
'function that can be returned or passed around. Free variables '
'used\n'
'in the nested function can access the local variables of the '
'function\n'
'containing the def. See section Naming and binding for '
'details.\n'
'\n'
'See also:\n'
'\n'
' **PEP 3107** - Function Annotations\n'
' The original specification for function annotations.\n'
'\n'
'\n'
'Class definitions\n'
'=================\n'
'\n'
'A class definition defines a class object (see section The '
'standard\n'
'type hierarchy):\n'
'\n'
' classdef ::= [decorators] "class" classname [inheritance] '
'":" suite\n'
' inheritance ::= "(" [argument_list] ")"\n'
' classname ::= identifier\n'
'\n'
'A class definition is an executable statement. The inheritance '
'list\n'
'usually gives a list of base classes (see Metaclasses for more\n'
'advanced uses), so each item in the list should evaluate to a '
'class\n'
'object which allows subclassing. Classes without an inheritance '
'list\n'
'inherit, by default, from the base class "object"; hence,\n'
'\n'
' class Foo:\n'
' pass\n'
'\n'
'is equivalent to\n'
'\n'
' class Foo(object):\n'
' pass\n'
'\n'
'The class’s suite is then executed in a new execution frame '
'(see\n'
'Naming and binding), using a newly created local namespace and '
'the\n'
'original global namespace. (Usually, the suite contains mostly\n'
'function definitions.) When the class’s suite finishes '
'execution, its\n'
'execution frame is discarded but its local namespace is saved. '
'[4] A\n'
'class object is then created using the inheritance list for the '
'base\n'
'classes and the saved local namespace for the attribute '
'dictionary.\n'
'The class name is bound to this class object in the original '
'local\n'
'namespace.\n'
'\n'
'The order in which attributes are defined in the class body is\n'
'preserved in the new class’s "__dict__". Note that this is '
'reliable\n'
'only right after the class is created and only for classes that '
'were\n'
'defined using the definition syntax.\n'
'\n'
'Class creation can be customized heavily using metaclasses.\n'
'\n'
'Classes can also be decorated: just like when decorating '
'functions,\n'
'\n'
' @f1(arg)\n'
' @f2\n'
' class Foo: pass\n'
'\n'
'is roughly equivalent to\n'
'\n'
' class Foo: pass\n'
' Foo = f1(arg)(f2(Foo))\n'
'\n'
'The evaluation rules for the decorator expressions are the same '
'as for\n'
'function decorators. The result is then bound to the class '
'name.\n'
'\n'
'**Programmer’s note:** Variables defined in the class definition '
'are\n'
'class attributes; they are shared by instances. Instance '
'attributes\n'
'can be set in a method with "self.name = value". Both class '
'and\n'
'instance attributes are accessible through the notation '
'“"self.name"”,\n'
'and an instance attribute hides a class attribute with the same '
'name\n'
'when accessed in this way. Class attributes can be used as '
'defaults\n'
'for instance attributes, but using mutable values there can lead '
'to\n'
'unexpected results. Descriptors can be used to create instance\n'
'variables with different implementation details.\n'
'\n'
'See also: **PEP 3115** - Metaclasses in Python 3 **PEP 3129** -\n'
' Class Decorators\n'
'\n'
'\n'
'Coroutines\n'
'==========\n'
'\n'
'New in version 3.5.\n'
'\n'
'\n'
'Coroutine function definition\n'
'-----------------------------\n'
'\n'
' async_funcdef ::= [decorators] "async" "def" funcname "(" '
'[parameter_list] ")" ["->" expression] ":" suite\n'
'\n'
'Execution of Python coroutines can be suspended and resumed at '
'many\n'
'points (see *coroutine*). In the body of a coroutine, any '
'"await" and\n'
'"async" identifiers become reserved keywords; "await" '
'expressions,\n'
'"async for" and "async with" can only be used in coroutine '
'bodies.\n'
'\n'
'Functions defined with "async def" syntax are always coroutine\n'
'functions, even if they do not contain "await" or "async" '
'keywords.\n'
'\n'
'It is a "SyntaxError" to use "yield from" expressions in "async '
'def"\n'
'coroutines.\n'
'\n'
'An example of a coroutine function:\n'
'\n'
' async def func(param1, param2):\n'
' do_stuff()\n'
' await some_coroutine()\n'
'\n'
'\n'
'The "async for" statement\n'
'-------------------------\n'
'\n'
' async_for_stmt ::= "async" for_stmt\n'
'\n'
'An *asynchronous iterable* is able to call asynchronous code in '
'its\n'
'*iter* implementation, and *asynchronous iterator* can call\n'
'asynchronous code in its *next* method.\n'
'\n'
'The "async for" statement allows convenient iteration over\n'
'asynchronous iterators.\n'
'\n'
'The following code:\n'
'\n'
' async for TARGET in ITER:\n'
' BLOCK\n'
' else:\n'
' BLOCK2\n'
'\n'
'Is semantically equivalent to:\n'
'\n'
' iter = (ITER)\n'
' iter = type(iter).__aiter__(iter)\n'
' running = True\n'
' while running:\n'
' try:\n'
' TARGET = await type(iter).__anext__(iter)\n'
' except StopAsyncIteration:\n'
' running = False\n'
' else:\n'
' BLOCK\n'
' else:\n'
' BLOCK2\n'
'\n'
'See also "__aiter__()" and "__anext__()" for details.\n'
'\n'
'It is a "SyntaxError" to use "async for" statement outside of '
'an\n'
'"async def" function.\n'
'\n'
'\n'
'The "async with" statement\n'
'--------------------------\n'
'\n'
' async_with_stmt ::= "async" with_stmt\n'
'\n'
'An *asynchronous context manager* is a *context manager* that is '
'able\n'
'to suspend execution in its *enter* and *exit* methods.\n'
'\n'
'The following code:\n'
'\n'
' async with EXPR as VAR:\n'
' BLOCK\n'
'\n'
'Is semantically equivalent to:\n'
'\n'
' mgr = (EXPR)\n'
' aexit = type(mgr).__aexit__\n'
' aenter = type(mgr).__aenter__(mgr)\n'
'\n'
' VAR = await aenter\n'
' try:\n'
' BLOCK\n'
' except:\n'
' if not await aexit(mgr, *sys.exc_info()):\n'
' raise\n'
' else:\n'
' await aexit(mgr, None, None, None)\n'
'\n'
'See also "__aenter__()" and "__aexit__()" for details.\n'
'\n'
'It is a "SyntaxError" to use "async with" statement outside of '
'an\n'
'"async def" function.\n'
'\n'
'See also: **PEP 492** - Coroutines with async and await syntax\n'
'\n'
'-[ Footnotes ]-\n'
'\n'
'[1] The exception is propagated to the invocation stack unless\n'
' there is a "finally" clause which happens to raise another\n'
' exception. That new exception causes the old one to be '
'lost.\n'
'\n'
'[2] Currently, control “flows off the end” except in the case '
'of\n'
' an exception or the execution of a "return", "continue", or\n'
' "break" statement.\n'
'\n'
'[3] A string literal appearing as the first statement in the\n'
' function body is transformed into the function’s "__doc__"\n'
' attribute and therefore the function’s *docstring*.\n'
'\n'
'[4] A string literal appearing as the first statement in the '
'class\n'
' body is transformed into the namespace’s "__doc__" item and\n'
' therefore the class’s *docstring*.\n',
'context-managers': 'With Statement Context Managers\n'
'*******************************\n'
'\n'
'A *context manager* is an object that defines the '
'runtime context to\n'
'be established when executing a "with" statement. The '
'context manager\n'
'handles the entry into, and the exit from, the desired '
'runtime context\n'
'for the execution of the block of code. Context '
'managers are normally\n'
'invoked using the "with" statement (described in section '
'The with\n'
'statement), but can also be used by directly invoking '
'their methods.\n'
'\n'
'Typical uses of context managers include saving and '
'restoring various\n'
'kinds of global state, locking and unlocking resources, '
'closing opened\n'
'files, etc.\n'
'\n'
'For more information on context managers, see Context '
'Manager Types.\n'
'\n'
'object.__enter__(self)\n'
'\n'
' Enter the runtime context related to this object. The '
'"with"\n'
' statement will bind this method’s return value to the '
'target(s)\n'
' specified in the "as" clause of the statement, if '
'any.\n'
'\n'
'object.__exit__(self, exc_type, exc_value, traceback)\n'
'\n'
' Exit the runtime context related to this object. The '
'parameters\n'
' describe the exception that caused the context to be '
'exited. If the\n'
' context was exited without an exception, all three '
'arguments will\n'
' be "None".\n'
'\n'
' If an exception is supplied, and the method wishes to '
'suppress the\n'
' exception (i.e., prevent it from being propagated), '
'it should\n'
' return a true value. Otherwise, the exception will be '
'processed\n'
' normally upon exit from this method.\n'
'\n'
' Note that "__exit__()" methods should not reraise the '
'passed-in\n'
' exception; this is the caller’s responsibility.\n'
'\n'
'See also:\n'
'\n'
' **PEP 343** - The “with” statement\n'
' The specification, background, and examples for the '
'Python "with"\n'
' statement.\n',
'continue': 'The "continue" statement\n'
'************************\n'
'\n'
' continue_stmt ::= "continue"\n'
'\n'
'"continue" may only occur syntactically nested in a "for" or '
'"while"\n'
'loop, but not nested in a function or class definition or '
'"finally"\n'
'clause within that loop. It continues with the next cycle of '
'the\n'
'nearest enclosing loop.\n'
'\n'
'When "continue" passes control out of a "try" statement with a\n'
'"finally" clause, that "finally" clause is executed before '
'really\n'
'starting the next loop cycle.\n',
'conversions': 'Arithmetic conversions\n'
'**********************\n'
'\n'
'When a description of an arithmetic operator below uses the '
'phrase\n'
'“the numeric arguments are converted to a common type,” this '
'means\n'
'that the operator implementation for built-in types works as '
'follows:\n'
'\n'
'* If either argument is a complex number, the other is '
'converted to\n'
' complex;\n'
'\n'
'* otherwise, if either argument is a floating point number, '
'the\n'
' other is converted to floating point;\n'
'\n'
'* otherwise, both must be integers and no conversion is '
'necessary.\n'
'\n'
'Some additional rules apply for certain operators (e.g., a '
'string as a\n'
'left argument to the ‘%’ operator). Extensions must define '
'their own\n'
'conversion behavior.\n',
'customization': 'Basic customization\n'
'*******************\n'
'\n'
'object.__new__(cls[, ...])\n'
'\n'
' Called to create a new instance of class *cls*. '
'"__new__()" is a\n'
' static method (special-cased so you need not declare it '
'as such)\n'
' that takes the class of which an instance was requested '
'as its\n'
' first argument. The remaining arguments are those '
'passed to the\n'
' object constructor expression (the call to the class). '
'The return\n'
' value of "__new__()" should be the new object instance '
'(usually an\n'
' instance of *cls*).\n'
'\n'
' Typical implementations create a new instance of the '
'class by\n'
' invoking the superclass’s "__new__()" method using\n'
' "super().__new__(cls[, ...])" with appropriate arguments '
'and then\n'
' modifying the newly-created instance as necessary before '
'returning\n'
' it.\n'
'\n'
' If "__new__()" returns an instance of *cls*, then the '
'new\n'
' instance’s "__init__()" method will be invoked like\n'
' "__init__(self[, ...])", where *self* is the new '
'instance and the\n'
' remaining arguments are the same as were passed to '
'"__new__()".\n'
'\n'
' If "__new__()" does not return an instance of *cls*, '
'then the new\n'
' instance’s "__init__()" method will not be invoked.\n'
'\n'
' "__new__()" is intended mainly to allow subclasses of '
'immutable\n'
' types (like int, str, or tuple) to customize instance '
'creation. It\n'
' is also commonly overridden in custom metaclasses in '
'order to\n'
' customize class creation.\n'
'\n'
'object.__init__(self[, ...])\n'
'\n'
' Called after the instance has been created (by '
'"__new__()"), but\n'
' before it is returned to the caller. The arguments are '
'those\n'
' passed to the class constructor expression. If a base '
'class has an\n'
' "__init__()" method, the derived class’s "__init__()" '
'method, if\n'
' any, must explicitly call it to ensure proper '
'initialization of the\n'
' base class part of the instance; for example:\n'
' "super().__init__([args...])".\n'
'\n'
' Because "__new__()" and "__init__()" work together in '
'constructing\n'
' objects ("__new__()" to create it, and "__init__()" to '
'customize\n'
' it), no non-"None" value may be returned by '
'"__init__()"; doing so\n'
' will cause a "TypeError" to be raised at runtime.\n'
'\n'
'object.__del__(self)\n'
'\n'
' Called when the instance is about to be destroyed. This '
'is also\n'
' called a finalizer or (improperly) a destructor. If a '
'base class\n'
' has a "__del__()" method, the derived class’s '
'"__del__()" method,\n'
' if any, must explicitly call it to ensure proper '
'deletion of the\n'
' base class part of the instance.\n'
'\n'
' It is possible (though not recommended!) for the '
'"__del__()" method\n'
' to postpone destruction of the instance by creating a '
'new reference\n'
' to it. This is called object *resurrection*. It is\n'
' implementation-dependent whether "__del__()" is called a '
'second\n'
' time when a resurrected object is about to be destroyed; '
'the\n'
' current *CPython* implementation only calls it once.\n'
'\n'
' It is not guaranteed that "__del__()" methods are called '
'for\n'
' objects that still exist when the interpreter exits.\n'
'\n'
' Note: "del x" doesn’t directly call "x.__del__()" — the '
'former\n'
' decrements the reference count for "x" by one, and the '
'latter is\n'
' only called when "x"’s reference count reaches zero.\n'
'\n'
' **CPython implementation detail:** It is possible for a '
'reference\n'
' cycle to prevent the reference count of an object from '
'going to\n'
' zero. In this case, the cycle will be later detected '
'and deleted\n'
' by the *cyclic garbage collector*. A common cause of '
'reference\n'
' cycles is when an exception has been caught in a local '
'variable.\n'
' The frame’s locals then reference the exception, which '
'references\n'
' its own traceback, which references the locals of all '
'frames caught\n'
' in the traceback.\n'
'\n'
' See also: Documentation for the "gc" module.\n'
'\n'
' Warning: Due to the precarious circumstances under '
'which\n'
' "__del__()" methods are invoked, exceptions that occur '
'during\n'
' their execution are ignored, and a warning is printed '
'to\n'
' "sys.stderr" instead. In particular:\n'
'\n'
' * "__del__()" can be invoked when arbitrary code is '
'being\n'
' executed, including from any arbitrary thread. If '
'"__del__()"\n'
' needs to take a lock or invoke any other blocking '
'resource, it\n'
' may deadlock as the resource may already be taken by '
'the code\n'
' that gets interrupted to execute "__del__()".\n'
'\n'
' * "__del__()" can be executed during interpreter '
'shutdown. As\n'
' a consequence, the global variables it needs to '
'access\n'
' (including other modules) may already have been '
'deleted or set\n'
' to "None". Python guarantees that globals whose name '
'begins\n'
' with a single underscore are deleted from their '
'module before\n'
' other globals are deleted; if no other references to '
'such\n'
' globals exist, this may help in assuring that '
'imported modules\n'
' are still available at the time when the "__del__()" '
'method is\n'
' called.\n'
'\n'
'object.__repr__(self)\n'
'\n'
' Called by the "repr()" built-in function to compute the '
'“official”\n'
' string representation of an object. If at all possible, '
'this\n'
' should look like a valid Python expression that could be '
'used to\n'
' recreate an object with the same value (given an '
'appropriate\n'
' environment). If this is not possible, a string of the '
'form\n'
' "<...some useful description...>" should be returned. '
'The return\n'
' value must be a string object. If a class defines '
'"__repr__()" but\n'
' not "__str__()", then "__repr__()" is also used when an '
'“informal”\n'
' string representation of instances of that class is '
'required.\n'
'\n'
' This is typically used for debugging, so it is important '
'that the\n'
' representation is information-rich and unambiguous.\n'
'\n'
'object.__str__(self)\n'
'\n'
' Called by "str(object)" and the built-in functions '
'"format()" and\n'
' "print()" to compute the “informal” or nicely printable '
'string\n'
' representation of an object. The return value must be a '
'string\n'
' object.\n'
'\n'
' This method differs from "object.__repr__()" in that '
'there is no\n'
' expectation that "__str__()" return a valid Python '
'expression: a\n'
' more convenient or concise representation can be used.\n'
'\n'
' The default implementation defined by the built-in type '
'"object"\n'
' calls "object.__repr__()".\n'
'\n'
'object.__bytes__(self)\n'
'\n'
' Called by bytes to compute a byte-string representation '
'of an\n'
' object. This should return a "bytes" object.\n'
'\n'
'object.__format__(self, format_spec)\n'
'\n'
' Called by the "format()" built-in function, and by '
'extension,\n'
' evaluation of formatted string literals and the '
'"str.format()"\n'
' method, to produce a “formatted” string representation '
'of an\n'
' object. The "format_spec" argument is a string that '
'contains a\n'
' description of the formatting options desired. The '
'interpretation\n'
' of the "format_spec" argument is up to the type '
'implementing\n'
' "__format__()", however most classes will either '
'delegate\n'
' formatting to one of the built-in types, or use a '
'similar\n'
' formatting option syntax.\n'
'\n'
' See Format Specification Mini-Language for a description '
'of the\n'
' standard formatting syntax.\n'
'\n'
' The return value must be a string object.\n'
'\n'
' Changed in version 3.4: The __format__ method of '
'"object" itself\n'
' raises a "TypeError" if passed any non-empty string.\n'
'\n'
'object.__lt__(self, other)\n'
'object.__le__(self, other)\n'
'object.__eq__(self, other)\n'
'object.__ne__(self, other)\n'
'object.__gt__(self, other)\n'
'object.__ge__(self, other)\n'
'\n'
' These are the so-called “rich comparison” methods. The\n'
' correspondence between operator symbols and method names '
'is as\n'
' follows: "x<y" calls "x.__lt__(y)", "x<=y" calls '
'"x.__le__(y)",\n'
' "x==y" calls "x.__eq__(y)", "x!=y" calls "x.__ne__(y)", '
'"x>y" calls\n'
' "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)".\n'
'\n'
' A rich comparison method may return the singleton '
'"NotImplemented"\n'
' if it does not implement the operation for a given pair '
'of\n'
' arguments. By convention, "False" and "True" are '
'returned for a\n'
' successful comparison. However, these methods can return '
'any value,\n'
' so if the comparison operator is used in a Boolean '
'context (e.g.,\n'
' in the condition of an "if" statement), Python will call '
'"bool()"\n'
' on the value to determine if the result is true or '
'false.\n'
'\n'
' By default, "__ne__()" delegates to "__eq__()" and '
'inverts the\n'
' result unless it is "NotImplemented". There are no '
'other implied\n'
' relationships among the comparison operators, for '
'example, the\n'
' truth of "(x<y or x==y)" does not imply "x<=y". To '
'automatically\n'
' generate ordering operations from a single root '
'operation, see\n'
' "functools.total_ordering()".\n'
'\n'
' See the paragraph on "__hash__()" for some important '
'notes on\n'
' creating *hashable* objects which support custom '
'comparison\n'
' operations and are usable as dictionary keys.\n'
'\n'
' There are no swapped-argument versions of these methods '
'(to be used\n'
' when the left argument does not support the operation '
'but the right\n'
' argument does); rather, "__lt__()" and "__gt__()" are '
'each other’s\n'
' reflection, "__le__()" and "__ge__()" are each other’s '
'reflection,\n'
' and "__eq__()" and "__ne__()" are their own reflection. '
'If the\n'
' operands are of different types, and right operand’s '
'type is a\n'
' direct or indirect subclass of the left operand’s type, '
'the\n'
' reflected method of the right operand has priority, '
'otherwise the\n'
' left operand’s method has priority. Virtual subclassing '
'is not\n'
' considered.\n'
'\n'
'object.__hash__(self)\n'
'\n'
' Called by built-in function "hash()" and for operations '
'on members\n'
' of hashed collections including "set", "frozenset", and '
'"dict".\n'
' "__hash__()" should return an integer. The only required '
'property\n'
' is that objects which compare equal have the same hash '
'value; it is\n'
' advised to mix together the hash values of the '
'components of the\n'
' object that also play a part in comparison of objects by '
'packing\n'
' them into a tuple and hashing the tuple. Example:\n'
'\n'
' def __hash__(self):\n'
' return hash((self.name, self.nick, self.color))\n'
'\n'
' Note: "hash()" truncates the value returned from an '
'object’s\n'
' custom "__hash__()" method to the size of a '
'"Py_ssize_t". This\n'
' is typically 8 bytes on 64-bit builds and 4 bytes on '
'32-bit\n'
' builds. If an object’s "__hash__()" must '
'interoperate on builds\n'
' of different bit sizes, be sure to check the width on '
'all\n'
' supported builds. An easy way to do this is with '
'"python -c\n'
' "import sys; print(sys.hash_info.width)"".\n'
'\n'
' If a class does not define an "__eq__()" method it '
'should not\n'
' define a "__hash__()" operation either; if it defines '
'"__eq__()"\n'
' but not "__hash__()", its instances will not be usable '
'as items in\n'
' hashable collections. If a class defines mutable '
'objects and\n'
' implements an "__eq__()" method, it should not '
'implement\n'
' "__hash__()", since the implementation of hashable '
'collections\n'
' requires that a key’s hash value is immutable (if the '
'object’s hash\n'
' value changes, it will be in the wrong hash bucket).\n'
'\n'
' User-defined classes have "__eq__()" and "__hash__()" '
'methods by\n'
' default; with them, all objects compare unequal (except '
'with\n'
' themselves) and "x.__hash__()" returns an appropriate '
'value such\n'
' that "x == y" implies both that "x is y" and "hash(x) == '
'hash(y)".\n'
'\n'
' A class that overrides "__eq__()" and does not define '
'"__hash__()"\n'
' will have its "__hash__()" implicitly set to "None". '
'When the\n'
' "__hash__()" method of a class is "None", instances of '
'the class\n'
' will raise an appropriate "TypeError" when a program '
'attempts to\n'
' retrieve their hash value, and will also be correctly '
'identified as\n'
' unhashable when checking "isinstance(obj, '
'collections.Hashable)".\n'
'\n'
' If a class that overrides "__eq__()" needs to retain '
'the\n'
' implementation of "__hash__()" from a parent class, the '
'interpreter\n'
' must be told this explicitly by setting "__hash__ =\n'
' <ParentClass>.__hash__".\n'
'\n'
' If a class that does not override "__eq__()" wishes to '
'suppress\n'
' hash support, it should include "__hash__ = None" in the '
'class\n'
' definition. A class which defines its own "__hash__()" '
'that\n'
' explicitly raises a "TypeError" would be incorrectly '
'identified as\n'
' hashable by an "isinstance(obj, collections.Hashable)" '
'call.\n'
'\n'
' Note: By default, the "__hash__()" values of str, bytes '
'and\n'
' datetime objects are “salted” with an unpredictable '
'random value.\n'
' Although they remain constant within an individual '
'Python\n'
' process, they are not predictable between repeated '
'invocations of\n'
' Python.This is intended to provide protection against '
'a denial-\n'
' of-service caused by carefully-chosen inputs that '
'exploit the\n'
' worst case performance of a dict insertion, O(n^2) '
'complexity.\n'
' See '
'http://www.ocert.org/advisories/ocert-2011-003.html for\n'
' details.Changing hash values affects the iteration '
'order of\n'
' dicts, sets and other mappings. Python has never made '
'guarantees\n'
' about this ordering (and it typically varies between '
'32-bit and\n'
' 64-bit builds).See also "PYTHONHASHSEED".\n'
'\n'
' Changed in version 3.3: Hash randomization is enabled by '
'default.\n'
'\n'
'object.__bool__(self)\n'
'\n'
' Called to implement truth value testing and the built-in '
'operation\n'
' "bool()"; should return "False" or "True". When this '
'method is not\n'
' defined, "__len__()" is called, if it is defined, and '
'the object is\n'
' considered true if its result is nonzero. If a class '
'defines\n'
' neither "__len__()" nor "__bool__()", all its instances '
'are\n'
' considered true.\n',
'debugger': '"pdb" — The Python Debugger\n'
'***************************\n'
'\n'
'**Source code:** Lib/pdb.py\n'
'\n'
'======================================================================\n'
'\n'
'The module "pdb" defines an interactive source code debugger '
'for\n'
'Python programs. It supports setting (conditional) breakpoints '
'and\n'
'single stepping at the source line level, inspection of stack '
'frames,\n'
'source code listing, and evaluation of arbitrary Python code in '
'the\n'
'context of any stack frame. It also supports post-mortem '
'debugging\n'
'and can be called under program control.\n'
'\n'
'The debugger is extensible – it is actually defined as the '
'class\n'
'"Pdb". This is currently undocumented but easily understood by '
'reading\n'
'the source. The extension interface uses the modules "bdb" and '
'"cmd".\n'
'\n'
'The debugger’s prompt is "(Pdb)". Typical usage to run a program '
'under\n'
'control of the debugger is:\n'
'\n'
' >>> import pdb\n'
' >>> import mymodule\n'
" >>> pdb.run('mymodule.test()')\n"
' > <string>(0)?()\n'
' (Pdb) continue\n'
' > <string>(1)?()\n'
' (Pdb) continue\n'
" NameError: 'spam'\n"
' > <string>(1)?()\n'
' (Pdb)\n'
'\n'
'Changed in version 3.3: Tab-completion via the "readline" module '
'is\n'
'available for commands and command arguments, e.g. the current '
'global\n'
'and local names are offered as arguments of the "p" command.\n'
'\n'
'"pdb.py" can also be invoked as a script to debug other '
'scripts. For\n'
'example:\n'
'\n'
' python3 -m pdb myscript.py\n'
'\n'
'When invoked as a script, pdb will automatically enter '
'post-mortem\n'
'debugging if the program being debugged exits abnormally. After '
'post-\n'
'mortem debugging (or after normal exit of the program), pdb '
'will\n'
'restart the program. Automatic restarting preserves pdb’s state '
'(such\n'
'as breakpoints) and in most cases is more useful than quitting '
'the\n'
'debugger upon program’s exit.\n'
'\n'
'New in version 3.2: "pdb.py" now accepts a "-c" option that '
'executes\n'
'commands as if given in a ".pdbrc" file, see Debugger Commands.\n'
'\n'
'The typical usage to break into the debugger from a running '
'program is\n'
'to insert\n'
'\n'
' import pdb; pdb.set_trace()\n'
'\n'
'at the location you want to break into the debugger. You can '
'then\n'
'step through the code following this statement, and continue '
'running\n'
'without the debugger using the "continue" command.\n'
'\n'
'The typical usage to inspect a crashed program is:\n'
'\n'
' >>> import pdb\n'
' >>> import mymodule\n'
' >>> mymodule.test()\n'
' Traceback (most recent call last):\n'
' File "<stdin>", line 1, in <module>\n'
' File "./mymodule.py", line 4, in test\n'
' test2()\n'
' File "./mymodule.py", line 3, in test2\n'
' print(spam)\n'
' NameError: spam\n'
' >>> pdb.pm()\n'
' > ./mymodule.py(3)test2()\n'
' -> print(spam)\n'
' (Pdb)\n'
'\n'
'The module defines the following functions; each enters the '
'debugger\n'
'in a slightly different way:\n'
'\n'
'pdb.run(statement, globals=None, locals=None)\n'
'\n'
' Execute the *statement* (given as a string or a code object) '
'under\n'
' debugger control. The debugger prompt appears before any '
'code is\n'
' executed; you can set breakpoints and type "continue", or you '
'can\n'
' step through the statement using "step" or "next" (all these\n'
' commands are explained below). The optional *globals* and '
'*locals*\n'
' arguments specify the environment in which the code is '
'executed; by\n'
' default the dictionary of the module "__main__" is used. '
'(See the\n'
' explanation of the built-in "exec()" or "eval()" functions.)\n'
'\n'
'pdb.runeval(expression, globals=None, locals=None)\n'
'\n'
' Evaluate the *expression* (given as a string or a code '
'object)\n'
' under debugger control. When "runeval()" returns, it returns '
'the\n'
' value of the expression. Otherwise this function is similar '
'to\n'
' "run()".\n'
'\n'
'pdb.runcall(function, *args, **kwds)\n'
'\n'
' Call the *function* (a function or method object, not a '
'string)\n'
' with the given arguments. When "runcall()" returns, it '
'returns\n'
' whatever the function call returned. The debugger prompt '
'appears\n'
' as soon as the function is entered.\n'
'\n'
'pdb.set_trace()\n'
'\n'
' Enter the debugger at the calling stack frame. This is '
'useful to\n'
' hard-code a breakpoint at a given point in a program, even if '
'the\n'
' code is not otherwise being debugged (e.g. when an assertion\n'
' fails).\n'
'\n'
'pdb.post_mortem(traceback=None)\n'
'\n'
' Enter post-mortem debugging of the given *traceback* object. '
'If no\n'
' *traceback* is given, it uses the one of the exception that '
'is\n'
' currently being handled (an exception must be being handled '
'if the\n'
' default is to be used).\n'
'\n'
'pdb.pm()\n'
'\n'
' Enter post-mortem debugging of the traceback found in\n'
' "sys.last_traceback".\n'
'\n'
'The "run*" functions and "set_trace()" are aliases for '
'instantiating\n'
'the "Pdb" class and calling the method of the same name. If you '
'want\n'
'to access further features, you have to do this yourself:\n'
'\n'
"class pdb.Pdb(completekey='tab', stdin=None, stdout=None, "
'skip=None, nosigint=False, readrc=True)\n'
'\n'
' "Pdb" is the debugger class.\n'
'\n'
' The *completekey*, *stdin* and *stdout* arguments are passed '
'to the\n'
' underlying "cmd.Cmd" class; see the description there.\n'
'\n'
' The *skip* argument, if given, must be an iterable of '
'glob-style\n'
' module name patterns. The debugger will not step into frames '
'that\n'
' originate in a module that matches one of these patterns. '
'[1]\n'
'\n'
' By default, Pdb sets a handler for the SIGINT signal (which '
'is sent\n'
' when the user presses "Ctrl-C" on the console) when you give '
'a\n'
' "continue" command. This allows you to break into the '
'debugger\n'
' again by pressing "Ctrl-C". If you want Pdb not to touch '
'the\n'
' SIGINT handler, set *nosigint* to true.\n'
'\n'
' The *readrc* argument defaults to true and controls whether '
'Pdb\n'
' will load .pdbrc files from the filesystem.\n'
'\n'
' Example call to enable tracing with *skip*:\n'
'\n'
" import pdb; pdb.Pdb(skip=['django.*']).set_trace()\n"
'\n'
' New in version 3.1: The *skip* argument.\n'
'\n'
' New in version 3.2: The *nosigint* argument. Previously, a '
'SIGINT\n'
' handler was never set by Pdb.\n'
'\n'
' Changed in version 3.6: The *readrc* argument.\n'
'\n'
' run(statement, globals=None, locals=None)\n'
' runeval(expression, globals=None, locals=None)\n'
' runcall(function, *args, **kwds)\n'
' set_trace()\n'
'\n'
' See the documentation for the functions explained above.\n'
'\n'
'\n'
'Debugger Commands\n'
'=================\n'
'\n'
'The commands recognized by the debugger are listed below. Most\n'
'commands can be abbreviated to one or two letters as indicated; '
'e.g.\n'
'"h(elp)" means that either "h" or "help" can be used to enter '
'the help\n'
'command (but not "he" or "hel", nor "H" or "Help" or "HELP").\n'
'Arguments to commands must be separated by whitespace (spaces '
'or\n'
'tabs). Optional arguments are enclosed in square brackets '
'("[]") in\n'
'the command syntax; the square brackets must not be typed.\n'
'Alternatives in the command syntax are separated by a vertical '
'bar\n'
'("|").\n'
'\n'
'Entering a blank line repeats the last command entered. '
'Exception: if\n'
'the last command was a "list" command, the next 11 lines are '
'listed.\n'
'\n'
'Commands that the debugger doesn’t recognize are assumed to be '
'Python\n'
'statements and are executed in the context of the program being\n'
'debugged. Python statements can also be prefixed with an '
'exclamation\n'
'point ("!"). This is a powerful way to inspect the program '
'being\n'
'debugged; it is even possible to change a variable or call a '
'function.\n'
'When an exception occurs in such a statement, the exception name '
'is\n'
'printed but the debugger’s state is not changed.\n'
'\n'
'The debugger supports aliases. Aliases can have parameters '
'which\n'
'allows one a certain level of adaptability to the context under\n'
'examination.\n'
'\n'
'Multiple commands may be entered on a single line, separated by '
'";;".\n'
'(A single ";" is not used as it is the separator for multiple '
'commands\n'
'in a line that is passed to the Python parser.) No intelligence '
'is\n'
'applied to separating the commands; the input is split at the '
'first\n'
'";;" pair, even if it is in the middle of a quoted string.\n'
'\n'
'If a file ".pdbrc" exists in the user’s home directory or in '
'the\n'
'current directory, it is read in and executed as if it had been '
'typed\n'
'at the debugger prompt. This is particularly useful for '
'aliases. If\n'
'both files exist, the one in the home directory is read first '
'and\n'
'aliases defined there can be overridden by the local file.\n'
'\n'
'Changed in version 3.2: ".pdbrc" can now contain commands that\n'
'continue debugging, such as "continue" or "next". Previously, '
'these\n'
'commands had no effect.\n'
'\n'
'h(elp) [command]\n'
'\n'
' Without argument, print the list of available commands. With '
'a\n'
' *command* as argument, print help about that command. "help '
'pdb"\n'
' displays the full documentation (the docstring of the "pdb"\n'
' module). Since the *command* argument must be an identifier, '
'"help\n'
' exec" must be entered to get help on the "!" command.\n'
'\n'
'w(here)\n'
'\n'
' Print a stack trace, with the most recent frame at the '
'bottom. An\n'
' arrow indicates the current frame, which determines the '
'context of\n'
' most commands.\n'
'\n'
'd(own) [count]\n'
'\n'
' Move the current frame *count* (default one) levels down in '
'the\n'
' stack trace (to a newer frame).\n'
'\n'
'u(p) [count]\n'
'\n'
' Move the current frame *count* (default one) levels up in the '
'stack\n'
' trace (to an older frame).\n'
'\n'
'b(reak) [([filename:]lineno | function) [, condition]]\n'
'\n'
' With a *lineno* argument, set a break there in the current '
'file.\n'
' With a *function* argument, set a break at the first '
'executable\n'
' statement within that function. The line number may be '
'prefixed\n'
' with a filename and a colon, to specify a breakpoint in '
'another\n'
' file (probably one that hasn’t been loaded yet). The file '
'is\n'
' searched on "sys.path". Note that each breakpoint is '
'assigned a\n'
' number to which all the other breakpoint commands refer.\n'
'\n'
' If a second argument is present, it is an expression which '
'must\n'
' evaluate to true before the breakpoint is honored.\n'
'\n'
' Without argument, list all breaks, including for each '
'breakpoint,\n'
' the number of times that breakpoint has been hit, the '
'current\n'
' ignore count, and the associated condition if any.\n'
'\n'
'tbreak [([filename:]lineno | function) [, condition]]\n'
'\n'
' Temporary breakpoint, which is removed automatically when it '
'is\n'
' first hit. The arguments are the same as for "break".\n'
'\n'
'cl(ear) [filename:lineno | bpnumber [bpnumber ...]]\n'
'\n'
' With a *filename:lineno* argument, clear all the breakpoints '
'at\n'
' this line. With a space separated list of breakpoint numbers, '
'clear\n'
' those breakpoints. Without argument, clear all breaks (but '
'first\n'
' ask confirmation).\n'
'\n'
'disable [bpnumber [bpnumber ...]]\n'
'\n'
' Disable the breakpoints given as a space separated list of\n'
' breakpoint numbers. Disabling a breakpoint means it cannot '
'cause\n'
' the program to stop execution, but unlike clearing a '
'breakpoint, it\n'
' remains in the list of breakpoints and can be (re-)enabled.\n'
'\n'
'enable [bpnumber [bpnumber ...]]\n'
'\n'
' Enable the breakpoints specified.\n'
'\n'
'ignore bpnumber [count]\n'
'\n'
' Set the ignore count for the given breakpoint number. If '
'count is\n'
' omitted, the ignore count is set to 0. A breakpoint becomes '
'active\n'
' when the ignore count is zero. When non-zero, the count is\n'
' decremented each time the breakpoint is reached and the '
'breakpoint\n'
' is not disabled and any associated condition evaluates to '
'true.\n'
'\n'
'condition bpnumber [condition]\n'
'\n'
' Set a new *condition* for the breakpoint, an expression which '
'must\n'
' evaluate to true before the breakpoint is honored. If '
'*condition*\n'
' is absent, any existing condition is removed; i.e., the '
'breakpoint\n'
' is made unconditional.\n'
'\n'
'commands [bpnumber]\n'
'\n'
' Specify a list of commands for breakpoint number *bpnumber*. '
'The\n'
' commands themselves appear on the following lines. Type a '
'line\n'
' containing just "end" to terminate the commands. An example:\n'
'\n'
' (Pdb) commands 1\n'
' (com) p some_variable\n'
' (com) end\n'
' (Pdb)\n'
'\n'
' To remove all commands from a breakpoint, type commands and '
'follow\n'
' it immediately with "end"; that is, give no commands.\n'
'\n'
' With no *bpnumber* argument, commands refers to the last '
'breakpoint\n'
' set.\n'
'\n'
' You can use breakpoint commands to start your program up '
'again.\n'
' Simply use the continue command, or step, or any other '
'command that\n'
' resumes execution.\n'
'\n'
' Specifying any command resuming execution (currently '
'continue,\n'
' step, next, return, jump, quit and their abbreviations) '
'terminates\n'
' the command list (as if that command was immediately followed '
'by\n'
' end). This is because any time you resume execution (even '
'with a\n'
' simple next or step), you may encounter another '
'breakpoint—which\n'
' could have its own command list, leading to ambiguities about '
'which\n'
' list to execute.\n'
'\n'
' If you use the ‘silent’ command in the command list, the '
'usual\n'
' message about stopping at a breakpoint is not printed. This '
'may be\n'
' desirable for breakpoints that are to print a specific '
'message and\n'
' then continue. If none of the other commands print anything, '
'you\n'
' see no sign that the breakpoint was reached.\n'
'\n'
's(tep)\n'
'\n'
' Execute the current line, stop at the first possible '
'occasion\n'
' (either in a function that is called or on the next line in '
'the\n'
' current function).\n'
'\n'
'n(ext)\n'
'\n'
' Continue execution until the next line in the current '
'function is\n'
' reached or it returns. (The difference between "next" and '
'"step"\n'
' is that "step" stops inside a called function, while "next"\n'
' executes called functions at (nearly) full speed, only '
'stopping at\n'
' the next line in the current function.)\n'
'\n'
'unt(il) [lineno]\n'
'\n'
' Without argument, continue execution until the line with a '
'number\n'
' greater than the current one is reached.\n'
'\n'
' With a line number, continue execution until a line with a '
'number\n'
' greater or equal to that is reached. In both cases, also '
'stop when\n'
' the current frame returns.\n'
'\n'
' Changed in version 3.2: Allow giving an explicit line '
'number.\n'
'\n'
'r(eturn)\n'
'\n'
' Continue execution until the current function returns.\n'
'\n'
'c(ont(inue))\n'
'\n'
' Continue execution, only stop when a breakpoint is '
'encountered.\n'
'\n'
'j(ump) lineno\n'
'\n'
' Set the next line that will be executed. Only available in '
'the\n'
' bottom-most frame. This lets you jump back and execute code '
'again,\n'
' or jump forward to skip code that you don’t want to run.\n'
'\n'
' It should be noted that not all jumps are allowed – for '
'instance it\n'
' is not possible to jump into the middle of a "for" loop or '
'out of a\n'
' "finally" clause.\n'
'\n'
'l(ist) [first[, last]]\n'
'\n'
' List source code for the current file. Without arguments, '
'list 11\n'
' lines around the current line or continue the previous '
'listing.\n'
' With "." as argument, list 11 lines around the current line. '
'With\n'
' one argument, list 11 lines around at that line. With two\n'
' arguments, list the given range; if the second argument is '
'less\n'
' than the first, it is interpreted as a count.\n'
'\n'
' The current line in the current frame is indicated by "->". '
'If an\n'
' exception is being debugged, the line where the exception '
'was\n'
' originally raised or propagated is indicated by ">>", if it '
'differs\n'
' from the current line.\n'
'\n'
' New in version 3.2: The ">>" marker.\n'
'\n'
'll | longlist\n'
'\n'
' List all source code for the current function or frame.\n'
' Interesting lines are marked as for "list".\n'
'\n'
' New in version 3.2.\n'
'\n'
'a(rgs)\n'
'\n'
' Print the argument list of the current function.\n'
'\n'
'p expression\n'
'\n'
' Evaluate the *expression* in the current context and print '
'its\n'
' value.\n'
'\n'
' Note: "print()" can also be used, but is not a debugger '
'command —\n'
' this executes the Python "print()" function.\n'
'\n'
'pp expression\n'
'\n'
' Like the "p" command, except the value of the expression is '
'pretty-\n'
' printed using the "pprint" module.\n'
'\n'
'whatis expression\n'
'\n'
' Print the type of the *expression*.\n'
'\n'
'source expression\n'
'\n'
' Try to get source code for the given object and display it.\n'
'\n'
' New in version 3.2.\n'
'\n'
'display [expression]\n'
'\n'
' Display the value of the expression if it changed, each time\n'
' execution stops in the current frame.\n'
'\n'
' Without expression, list all display expressions for the '
'current\n'
' frame.\n'
'\n'
' New in version 3.2.\n'
'\n'
'undisplay [expression]\n'
'\n'
' Do not display the expression any more in the current frame.\n'
' Without expression, clear all display expressions for the '
'current\n'
' frame.\n'
'\n'
' New in version 3.2.\n'
'\n'
'interact\n'
'\n'
' Start an interactive interpreter (using the "code" module) '
'whose\n'
' global namespace contains all the (global and local) names '
'found in\n'
' the current scope.\n'
'\n'
' New in version 3.2.\n'
'\n'
'alias [name [command]]\n'
'\n'
' Create an alias called *name* that executes *command*. The '
'command\n'
' must *not* be enclosed in quotes. Replaceable parameters can '
'be\n'
' indicated by "%1", "%2", and so on, while "%*" is replaced by '
'all\n'
' the parameters. If no command is given, the current alias '
'for\n'
' *name* is shown. If no arguments are given, all aliases are '
'listed.\n'
'\n'
' Aliases may be nested and can contain anything that can be '
'legally\n'
' typed at the pdb prompt. Note that internal pdb commands '
'*can* be\n'
' overridden by aliases. Such a command is then hidden until '
'the\n'
' alias is removed. Aliasing is recursively applied to the '
'first\n'
' word of the command line; all other words in the line are '
'left\n'
' alone.\n'
'\n'
' As an example, here are two useful aliases (especially when '
'placed\n'
' in the ".pdbrc" file):\n'
'\n'
' # Print instance variables (usage "pi classInst")\n'
' alias pi for k in %1.__dict__.keys(): '
'print("%1.",k,"=",%1.__dict__[k])\n'
' # Print instance variables in self\n'
' alias ps pi self\n'
'\n'
'unalias name\n'
'\n'
' Delete the specified alias.\n'
'\n'
'! statement\n'
'\n'
' Execute the (one-line) *statement* in the context of the '
'current\n'
' stack frame. The exclamation point can be omitted unless the '
'first\n'
' word of the statement resembles a debugger command. To set '
'a\n'
' global variable, you can prefix the assignment command with '
'a\n'
' "global" statement on the same line, e.g.:\n'
'\n'
" (Pdb) global list_options; list_options = ['-l']\n"
' (Pdb)\n'
'\n'
'run [args ...]\n'
'restart [args ...]\n'
'\n'
' Restart the debugged Python program. If an argument is '
'supplied,\n'
' it is split with "shlex" and the result is used as the new\n'
' "sys.argv". History, breakpoints, actions and debugger '
'options are\n'
' preserved. "restart" is an alias for "run".\n'
'\n'
'q(uit)\n'
'\n'
' Quit from the debugger. The program being executed is '
'aborted.\n'
'\n'
'-[ Footnotes ]-\n'
'\n'
'[1] Whether a frame is considered to originate in a certain '
'module\n'
' is determined by the "__name__" in the frame globals.\n',
'del': 'The "del" statement\n'
'*******************\n'
'\n'
' del_stmt ::= "del" target_list\n'
'\n'
'Deletion is recursively defined very similar to the way assignment '
'is\n'
'defined. Rather than spelling it out in full details, here are some\n'
'hints.\n'
'\n'
'Deletion of a target list recursively deletes each target, from left\n'
'to right.\n'
'\n'
'Deletion of a name removes the binding of that name from the local '
'or\n'
'global namespace, depending on whether the name occurs in a "global"\n'
'statement in the same code block. If the name is unbound, a\n'
'"NameError" exception will be raised.\n'
'\n'
'Deletion of attribute references, subscriptions and slicings is '
'passed\n'
'to the primary object involved; deletion of a slicing is in general\n'
'equivalent to assignment of an empty slice of the right type (but '
'even\n'
'this is determined by the sliced object).\n'
'\n'
'Changed in version 3.2: Previously it was illegal to delete a name\n'
'from the local namespace if it occurs as a free variable in a nested\n'
'block.\n',
'dict': 'Dictionary displays\n'
'*******************\n'
'\n'
'A dictionary display is a possibly empty series of key/datum pairs\n'
'enclosed in curly braces:\n'
'\n'
' dict_display ::= "{" [key_datum_list | dict_comprehension] '
'"}"\n'
' key_datum_list ::= key_datum ("," key_datum)* [","]\n'
' key_datum ::= expression ":" expression | "**" or_expr\n'
' dict_comprehension ::= expression ":" expression comp_for\n'
'\n'
'A dictionary display yields a new dictionary object.\n'
'\n'
'If a comma-separated sequence of key/datum pairs is given, they are\n'
'evaluated from left to right to define the entries of the '
'dictionary:\n'
'each key object is used as a key into the dictionary to store the\n'
'corresponding datum. This means that you can specify the same key\n'
'multiple times in the key/datum list, and the final dictionary’s '
'value\n'
'for that key will be the last one given.\n'
'\n'
'A double asterisk "**" denotes *dictionary unpacking*. Its operand\n'
'must be a *mapping*. Each mapping item is added to the new\n'
'dictionary. Later values replace values already set by earlier\n'
'key/datum pairs and earlier dictionary unpackings.\n'
'\n'
'New in version 3.5: Unpacking into dictionary displays, originally\n'
'proposed by **PEP 448**.\n'
'\n'
'A dict comprehension, in contrast to list and set comprehensions,\n'
'needs two expressions separated with a colon followed by the usual\n'
'“for” and “if” clauses. When the comprehension is run, the '
'resulting\n'
'key and value elements are inserted in the new dictionary in the '
'order\n'
'they are produced.\n'
'\n'
'Restrictions on the types of the key values are listed earlier in\n'
'section The standard type hierarchy. (To summarize, the key type\n'
'should be *hashable*, which excludes all mutable objects.) Clashes\n'
'between duplicate keys are not detected; the last datum (textually\n'
'rightmost in the display) stored for a given key value prevails.\n',
'dynamic-features': 'Interaction with dynamic features\n'
'*********************************\n'
'\n'
'Name resolution of free variables occurs at runtime, not '
'at compile\n'
'time. This means that the following code will print 42:\n'
'\n'
' i = 10\n'
' def f():\n'
' print(i)\n'
' i = 42\n'
' f()\n'
'\n'
'The "eval()" and "exec()" functions do not have access '
'to the full\n'
'environment for resolving names. Names may be resolved '
'in the local\n'
'and global namespaces of the caller. Free variables are '
'not resolved\n'
'in the nearest enclosing namespace, but in the global '
'namespace. [1]\n'
'The "exec()" and "eval()" functions have optional '
'arguments to\n'
'override the global and local namespace. If only one '
'namespace is\n'
'specified, it is used for both.\n',
'else': 'The "if" statement\n'
'******************\n'
'\n'
'The "if" statement is used for conditional execution:\n'
'\n'
' if_stmt ::= "if" expression ":" suite\n'
' ( "elif" expression ":" suite )*\n'
' ["else" ":" suite]\n'
'\n'
'It selects exactly one of the suites by evaluating the expressions '
'one\n'
'by one until one is found to be true (see section Boolean '
'operations\n'
'for the definition of true and false); then that suite is executed\n'
'(and no other part of the "if" statement is executed or evaluated).\n'
'If all expressions are false, the suite of the "else" clause, if\n'
'present, is executed.\n',
'exceptions': 'Exceptions\n'
'**********\n'
'\n'
'Exceptions are a means of breaking out of the normal flow of '
'control\n'
'of a code block in order to handle errors or other '
'exceptional\n'
'conditions. An exception is *raised* at the point where the '
'error is\n'
'detected; it may be *handled* by the surrounding code block or '
'by any\n'
'code block that directly or indirectly invoked the code block '
'where\n'
'the error occurred.\n'
'\n'
'The Python interpreter raises an exception when it detects a '
'run-time\n'
'error (such as division by zero). A Python program can also\n'
'explicitly raise an exception with the "raise" statement. '
'Exception\n'
'handlers are specified with the "try" … "except" statement. '
'The\n'
'"finally" clause of such a statement can be used to specify '
'cleanup\n'
'code which does not handle the exception, but is executed '
'whether an\n'
'exception occurred or not in the preceding code.\n'
'\n'
'Python uses the “termination” model of error handling: an '
'exception\n'
'handler can find out what happened and continue execution at '
'an outer\n'
'level, but it cannot repair the cause of the error and retry '
'the\n'
'failing operation (except by re-entering the offending piece '
'of code\n'
'from the top).\n'
'\n'
'When an exception is not handled at all, the interpreter '
'terminates\n'
'execution of the program, or returns to its interactive main '
'loop. In\n'
'either case, it prints a stack backtrace, except when the '
'exception is\n'
'"SystemExit".\n'
'\n'
'Exceptions are identified by class instances. The "except" '
'clause is\n'
'selected depending on the class of the instance: it must '
'reference the\n'
'class of the instance or a base class thereof. The instance '
'can be\n'
'received by the handler and can carry additional information '
'about the\n'
'exceptional condition.\n'
'\n'
'Note: Exception messages are not part of the Python API. '
'Their\n'
' contents may change from one version of Python to the next '
'without\n'
' warning and should not be relied on by code which will run '
'under\n'
' multiple versions of the interpreter.\n'
'\n'
'See also the description of the "try" statement in section The '
'try\n'
'statement and "raise" statement in section The raise '
'statement.\n'
'\n'
'-[ Footnotes ]-\n'
'\n'
'[1] This limitation occurs because the code that is executed '
'by\n'
' these operations is not available at the time the module '
'is\n'
' compiled.\n',
'execmodel': 'Execution model\n'
'***************\n'
'\n'
'\n'
'Structure of a program\n'
'======================\n'
'\n'
'A Python program is constructed from code blocks. A *block* is '
'a piece\n'
'of Python program text that is executed as a unit. The '
'following are\n'
'blocks: a module, a function body, and a class definition. '
'Each\n'
'command typed interactively is a block. A script file (a file '
'given\n'
'as standard input to the interpreter or specified as a command '
'line\n'
'argument to the interpreter) is a code block. A script command '
'(a\n'
'command specified on the interpreter command line with the '
'‘**-c**’\n'
'option) is a code block. The string argument passed to the '
'built-in\n'
'functions "eval()" and "exec()" is a code block.\n'
'\n'
'A code block is executed in an *execution frame*. A frame '
'contains\n'
'some administrative information (used for debugging) and '
'determines\n'
'where and how execution continues after the code block’s '
'execution has\n'
'completed.\n'
'\n'
'\n'
'Naming and binding\n'
'==================\n'
'\n'
'\n'
'Binding of names\n'
'----------------\n'
'\n'
'*Names* refer to objects. Names are introduced by name '
'binding\n'
'operations.\n'
'\n'
'The following constructs bind names: formal parameters to '
'functions,\n'
'"import" statements, class and function definitions (these bind '
'the\n'
'class or function name in the defining block), and targets that '
'are\n'
'identifiers if occurring in an assignment, "for" loop header, '
'or after\n'
'"as" in a "with" statement or "except" clause. The "import" '
'statement\n'
'of the form "from ... import *" binds all names defined in the\n'
'imported module, except those beginning with an underscore. '
'This form\n'
'may only be used at the module level.\n'
'\n'
'A target occurring in a "del" statement is also considered '
'bound for\n'
'this purpose (though the actual semantics are to unbind the '
'name).\n'
'\n'
'Each assignment or import statement occurs within a block '
'defined by a\n'
'class or function definition or at the module level (the '
'top-level\n'
'code block).\n'
'\n'
'If a name is bound in a block, it is a local variable of that '
'block,\n'
'unless declared as "nonlocal" or "global". If a name is bound '
'at the\n'
'module level, it is a global variable. (The variables of the '
'module\n'
'code block are local and global.) If a variable is used in a '
'code\n'
'block but not defined there, it is a *free variable*.\n'
'\n'
'Each occurrence of a name in the program text refers to the '
'*binding*\n'
'of that name established by the following name resolution '
'rules.\n'
'\n'
'\n'
'Resolution of names\n'
'-------------------\n'
'\n'
'A *scope* defines the visibility of a name within a block. If '
'a local\n'
'variable is defined in a block, its scope includes that block. '
'If the\n'
'definition occurs in a function block, the scope extends to any '
'blocks\n'
'contained within the defining one, unless a contained block '
'introduces\n'
'a different binding for the name.\n'
'\n'
'When a name is used in a code block, it is resolved using the '
'nearest\n'
'enclosing scope. The set of all such scopes visible to a code '
'block\n'
'is called the block’s *environment*.\n'
'\n'
'When a name is not found at all, a "NameError" exception is '
'raised. If\n'
'the current scope is a function scope, and the name refers to a '
'local\n'
'variable that has not yet been bound to a value at the point '
'where the\n'
'name is used, an "UnboundLocalError" exception is raised.\n'
'"UnboundLocalError" is a subclass of "NameError".\n'
'\n'
'If a name binding operation occurs anywhere within a code '
'block, all\n'
'uses of the name within the block are treated as references to '
'the\n'
'current block. This can lead to errors when a name is used '
'within a\n'
'block before it is bound. This rule is subtle. Python lacks\n'
'declarations and allows name binding operations to occur '
'anywhere\n'
'within a code block. The local variables of a code block can '
'be\n'
'determined by scanning the entire text of the block for name '
'binding\n'
'operations.\n'
'\n'
'If the "global" statement occurs within a block, all uses of '
'the name\n'
'specified in the statement refer to the binding of that name in '
'the\n'
'top-level namespace. Names are resolved in the top-level '
'namespace by\n'
'searching the global namespace, i.e. the namespace of the '
'module\n'
'containing the code block, and the builtins namespace, the '
'namespace\n'
'of the module "builtins". The global namespace is searched '
'first. If\n'
'the name is not found there, the builtins namespace is '
'searched. The\n'
'"global" statement must precede all uses of the name.\n'
'\n'
'The "global" statement has the same scope as a name binding '
'operation\n'
'in the same block. If the nearest enclosing scope for a free '
'variable\n'
'contains a global statement, the free variable is treated as a '
'global.\n'
'\n'
'The "nonlocal" statement causes corresponding names to refer '
'to\n'
'previously bound variables in the nearest enclosing function '
'scope.\n'
'"SyntaxError" is raised at compile time if the given name does '
'not\n'
'exist in any enclosing function scope.\n'
'\n'
'The namespace for a module is automatically created the first '
'time a\n'
'module is imported. The main module for a script is always '
'called\n'
'"__main__".\n'
'\n'
'Class definition blocks and arguments to "exec()" and "eval()" '
'are\n'
'special in the context of name resolution. A class definition '
'is an\n'
'executable statement that may use and define names. These '
'references\n'
'follow the normal rules for name resolution with an exception '
'that\n'
'unbound local variables are looked up in the global namespace. '
'The\n'
'namespace of the class definition becomes the attribute '
'dictionary of\n'
'the class. The scope of names defined in a class block is '
'limited to\n'
'the class block; it does not extend to the code blocks of '
'methods –\n'
'this includes comprehensions and generator expressions since '
'they are\n'
'implemented using a function scope. This means that the '
'following\n'
'will fail:\n'
'\n'
' class A:\n'
' a = 42\n'
' b = list(a + i for i in range(10))\n'
'\n'
'\n'
'Builtins and restricted execution\n'
'---------------------------------\n'
'\n'
'**CPython implementation detail:** Users should not touch\n'
'"__builtins__"; it is strictly an implementation detail. '
'Users\n'
'wanting to override values in the builtins namespace should '
'"import"\n'
'the "builtins" module and modify its attributes appropriately.\n'
'\n'
'The builtins namespace associated with the execution of a code '
'block\n'
'is actually found by looking up the name "__builtins__" in its '
'global\n'
'namespace; this should be a dictionary or a module (in the '
'latter case\n'
'the module’s dictionary is used). By default, when in the '
'"__main__"\n'
'module, "__builtins__" is the built-in module "builtins"; when '
'in any\n'
'other module, "__builtins__" is an alias for the dictionary of '
'the\n'
'"builtins" module itself.\n'
'\n'
'\n'
'Interaction with dynamic features\n'
'---------------------------------\n'
'\n'
'Name resolution of free variables occurs at runtime, not at '
'compile\n'
'time. This means that the following code will print 42:\n'
'\n'
' i = 10\n'
' def f():\n'
' print(i)\n'
' i = 42\n'
' f()\n'
'\n'
'The "eval()" and "exec()" functions do not have access to the '
'full\n'
'environment for resolving names. Names may be resolved in the '
'local\n'
'and global namespaces of the caller. Free variables are not '
'resolved\n'
'in the nearest enclosing namespace, but in the global '
'namespace. [1]\n'
'The "exec()" and "eval()" functions have optional arguments to\n'
'override the global and local namespace. If only one namespace '
'is\n'
'specified, it is used for both.\n'
'\n'
'\n'
'Exceptions\n'
'==========\n'
'\n'
'Exceptions are a means of breaking out of the normal flow of '
'control\n'
'of a code block in order to handle errors or other exceptional\n'
'conditions. An exception is *raised* at the point where the '
'error is\n'
'detected; it may be *handled* by the surrounding code block or '
'by any\n'
'code block that directly or indirectly invoked the code block '
'where\n'
'the error occurred.\n'
'\n'
'The Python interpreter raises an exception when it detects a '
'run-time\n'
'error (such as division by zero). A Python program can also\n'
'explicitly raise an exception with the "raise" statement. '
'Exception\n'
'handlers are specified with the "try" … "except" statement. '
'The\n'
'"finally" clause of such a statement can be used to specify '
'cleanup\n'
'code which does not handle the exception, but is executed '
'whether an\n'
'exception occurred or not in the preceding code.\n'
'\n'
'Python uses the “termination” model of error handling: an '
'exception\n'
'handler can find out what happened and continue execution at an '
'outer\n'
'level, but it cannot repair the cause of the error and retry '
'the\n'
'failing operation (except by re-entering the offending piece of '
'code\n'
'from the top).\n'
'\n'
'When an exception is not handled at all, the interpreter '
'terminates\n'
'execution of the program, or returns to its interactive main '
'loop. In\n'
'either case, it prints a stack backtrace, except when the '
'exception is\n'
'"SystemExit".\n'
'\n'
'Exceptions are identified by class instances. The "except" '
'clause is\n'
'selected depending on the class of the instance: it must '
'reference the\n'
'class of the instance or a base class thereof. The instance '
'can be\n'
'received by the handler and can carry additional information '
'about the\n'
'exceptional condition.\n'
'\n'
'Note: Exception messages are not part of the Python API. '
'Their\n'
' contents may change from one version of Python to the next '
'without\n'
' warning and should not be relied on by code which will run '
'under\n'
' multiple versions of the interpreter.\n'
'\n'
'See also the description of the "try" statement in section The '
'try\n'
'statement and "raise" statement in section The raise '
'statement.\n'
'\n'
'-[ Footnotes ]-\n'
'\n'
'[1] This limitation occurs because the code that is executed '
'by\n'
' these operations is not available at the time the module '
'is\n'
' compiled.\n',
'exprlists': 'Expression lists\n'
'****************\n'
'\n'
' expression_list ::= expression ( "," expression )* [","]\n'
' starred_list ::= starred_item ( "," starred_item )* '
'[","]\n'
' starred_expression ::= expression | ( starred_item "," )* '
'[starred_item]\n'
' starred_item ::= expression | "*" or_expr\n'
'\n'
'Except when part of a list or set display, an expression list\n'
'containing at least one comma yields a tuple. The length of '
'the tuple\n'
'is the number of expressions in the list. The expressions are\n'
'evaluated from left to right.\n'
'\n'
'An asterisk "*" denotes *iterable unpacking*. Its operand must '
'be an\n'
'*iterable*. The iterable is expanded into a sequence of items, '
'which\n'
'are included in the new tuple, list, or set, at the site of '
'the\n'
'unpacking.\n'
'\n'
'New in version 3.5: Iterable unpacking in expression lists, '
'originally\n'
'proposed by **PEP 448**.\n'
'\n'
'The trailing comma is required only to create a single tuple '
'(a.k.a. a\n'
'*singleton*); it is optional in all other cases. A single '
'expression\n'
'without a trailing comma doesn’t create a tuple, but rather '
'yields the\n'
'value of that expression. (To create an empty tuple, use an '
'empty pair\n'
'of parentheses: "()".)\n',
'floating': 'Floating point literals\n'
'***********************\n'
'\n'
'Floating point literals are described by the following lexical\n'
'definitions:\n'
'\n'
' floatnumber ::= pointfloat | exponentfloat\n'
' pointfloat ::= [digitpart] fraction | digitpart "."\n'
' exponentfloat ::= (digitpart | pointfloat) exponent\n'
' digitpart ::= digit (["_"] digit)*\n'
' fraction ::= "." digitpart\n'
' exponent ::= ("e" | "E") ["+" | "-"] digitpart\n'
'\n'
'Note that the integer and exponent parts are always interpreted '
'using\n'
'radix 10. For example, "077e010" is legal, and denotes the same '
'number\n'
'as "77e10". The allowed range of floating point literals is\n'
'implementation-dependent. As in integer literals, underscores '
'are\n'
'supported for digit grouping.\n'
'\n'
'Some examples of floating point literals:\n'
'\n'
' 3.14 10. .001 1e100 3.14e-10 0e0 '
'3.14_15_93\n'
'\n'
'Changed in version 3.6: Underscores are now allowed for '
'grouping\n'
'purposes in literals.\n',
'for': 'The "for" statement\n'
'*******************\n'
'\n'
'The "for" statement is used to iterate over the elements of a '
'sequence\n'
'(such as a string, tuple or list) or other iterable object:\n'
'\n'
' for_stmt ::= "for" target_list "in" expression_list ":" suite\n'
' ["else" ":" suite]\n'
'\n'
'The expression list is evaluated once; it should yield an iterable\n'
'object. An iterator is created for the result of the\n'
'"expression_list". The suite is then executed once for each item\n'
'provided by the iterator, in the order returned by the iterator. '
'Each\n'
'item in turn is assigned to the target list using the standard rules\n'
'for assignments (see Assignment statements), and then the suite is\n'
'executed. When the items are exhausted (which is immediately when '
'the\n'
'sequence is empty or an iterator raises a "StopIteration" '
'exception),\n'
'the suite in the "else" clause, if present, is executed, and the '
'loop\n'
'terminates.\n'
'\n'
'A "break" statement executed in the first suite terminates the loop\n'
'without executing the "else" clause’s suite. A "continue" statement\n'
'executed in the first suite skips the rest of the suite and '
'continues\n'
'with the next item, or with the "else" clause if there is no next\n'
'item.\n'
'\n'
'The for-loop makes assignments to the variables(s) in the target '
'list.\n'
'This overwrites all previous assignments to those variables '
'including\n'
'those made in the suite of the for-loop:\n'
'\n'
' for i in range(10):\n'
' print(i)\n'
' i = 5 # this will not affect the for-loop\n'
' # because i will be overwritten with the '
'next\n'
' # index in the range\n'
'\n'
'Names in the target list are not deleted when the loop is finished,\n'
'but if the sequence is empty, they will not have been assigned to at\n'
'all by the loop. Hint: the built-in function "range()" returns an\n'
'iterator of integers suitable to emulate the effect of Pascal’s "for '
'i\n'
':= a to b do"; e.g., "list(range(3))" returns the list "[0, 1, 2]".\n'
'\n'
'Note: There is a subtlety when the sequence is being modified by the\n'
' loop (this can only occur for mutable sequences, i.e. lists). An\n'
' internal counter is used to keep track of which item is used next,\n'
' and this is incremented on each iteration. When this counter has\n'
' reached the length of the sequence the loop terminates. This '
'means\n'
' that if the suite deletes the current (or a previous) item from '
'the\n'
' sequence, the next item will be skipped (since it gets the index '
'of\n'
' the current item which has already been treated). Likewise, if '
'the\n'
' suite inserts an item in the sequence before the current item, the\n'
' current item will be treated again the next time through the loop.\n'
' This can lead to nasty bugs that can be avoided by making a\n'
' temporary copy using a slice of the whole sequence, e.g.,\n'
'\n'
' for x in a[:]:\n'
' if x < 0: a.remove(x)\n',
'formatstrings': 'Format String Syntax\n'
'********************\n'
'\n'
'The "str.format()" method and the "Formatter" class share '
'the same\n'
'syntax for format strings (although in the case of '
'"Formatter",\n'
'subclasses can define their own format string syntax). The '
'syntax is\n'
'related to that of formatted string literals, but there '
'are\n'
'differences.\n'
'\n'
'Format strings contain “replacement fields” surrounded by '
'curly braces\n'
'"{}". Anything that is not contained in braces is '
'considered literal\n'
'text, which is copied unchanged to the output. If you need '
'to include\n'
'a brace character in the literal text, it can be escaped by '
'doubling:\n'
'"{{" and "}}".\n'
'\n'
'The grammar for a replacement field is as follows:\n'
'\n'
' replacement_field ::= "{" [field_name] ["!" '
'conversion] [":" format_spec] "}"\n'
' field_name ::= arg_name ("." attribute_name | '
'"[" element_index "]")*\n'
' arg_name ::= [identifier | digit+]\n'
' attribute_name ::= identifier\n'
' element_index ::= digit+ | index_string\n'
' index_string ::= <any source character except '
'"]"> +\n'
' conversion ::= "r" | "s" | "a"\n'
' format_spec ::= <described in the next '
'section>\n'
'\n'
'In less formal terms, the replacement field can start with '
'a\n'
'*field_name* that specifies the object whose value is to be '
'formatted\n'
'and inserted into the output instead of the replacement '
'field. The\n'
'*field_name* is optionally followed by a *conversion* '
'field, which is\n'
'preceded by an exclamation point "\'!\'", and a '
'*format_spec*, which is\n'
'preceded by a colon "\':\'". These specify a non-default '
'format for the\n'
'replacement value.\n'
'\n'
'See also the Format Specification Mini-Language section.\n'
'\n'
'The *field_name* itself begins with an *arg_name* that is '
'either a\n'
'number or a keyword. If it’s a number, it refers to a '
'positional\n'
'argument, and if it’s a keyword, it refers to a named '
'keyword\n'
'argument. If the numerical arg_names in a format string '
'are 0, 1, 2,\n'
'… in sequence, they can all be omitted (not just some) and '
'the numbers\n'
'0, 1, 2, … will be automatically inserted in that order. '
'Because\n'
'*arg_name* is not quote-delimited, it is not possible to '
'specify\n'
'arbitrary dictionary keys (e.g., the strings "\'10\'" or '
'"\':-]\'") within\n'
'a format string. The *arg_name* can be followed by any '
'number of index\n'
'or attribute expressions. An expression of the form '
'"\'.name\'" selects\n'
'the named attribute using "getattr()", while an expression '
'of the form\n'
'"\'[index]\'" does an index lookup using "__getitem__()".\n'
'\n'
'Changed in version 3.1: The positional argument specifiers '
'can be\n'
'omitted, so "\'{} {}\'" is equivalent to "\'{0} {1}\'".\n'
'\n'
'Some simple format string examples:\n'
'\n'
' "First, thou shalt count to {0}" # References first '
'positional argument\n'
' "Bring me a {}" # Implicitly '
'references the first positional argument\n'
' "From {} to {}" # Same as "From {0} to '
'{1}"\n'
' "My quest is {name}" # References keyword '
"argument 'name'\n"
' "Weight in tons {0.weight}" # \'weight\' attribute '
'of first positional arg\n'
' "Units destroyed: {players[0]}" # First element of '
"keyword argument 'players'.\n"
'\n'
'The *conversion* field causes a type coercion before '
'formatting.\n'
'Normally, the job of formatting a value is done by the '
'"__format__()"\n'
'method of the value itself. However, in some cases it is '
'desirable to\n'
'force a type to be formatted as a string, overriding its '
'own\n'
'definition of formatting. By converting the value to a '
'string before\n'
'calling "__format__()", the normal formatting logic is '
'bypassed.\n'
'\n'
'Three conversion flags are currently supported: "\'!s\'" '
'which calls\n'
'"str()" on the value, "\'!r\'" which calls "repr()" and '
'"\'!a\'" which\n'
'calls "ascii()".\n'
'\n'
'Some examples:\n'
'\n'
' "Harold\'s a clever {0!s}" # Calls str() on the '
'argument first\n'
' "Bring out the holy {name!r}" # Calls repr() on the '
'argument first\n'
' "More {!a}" # Calls ascii() on the '
'argument first\n'
'\n'
'The *format_spec* field contains a specification of how the '
'value\n'
'should be presented, including such details as field width, '
'alignment,\n'
'padding, decimal precision and so on. Each value type can '
'define its\n'
'own “formatting mini-language” or interpretation of the '
'*format_spec*.\n'
'\n'
'Most built-in types support a common formatting '
'mini-language, which\n'
'is described in the next section.\n'
'\n'
'A *format_spec* field can also include nested replacement '
'fields\n'
'within it. These nested replacement fields may contain a '
'field name,\n'
'conversion flag and format specification, but deeper '
'nesting is not\n'
'allowed. The replacement fields within the format_spec '
'are\n'
'substituted before the *format_spec* string is interpreted. '
'This\n'
'allows the formatting of a value to be dynamically '
'specified.\n'
'\n'
'See the Format examples section for some examples.\n'
'\n'
'\n'
'Format Specification Mini-Language\n'
'==================================\n'
'\n'
'“Format specifications” are used within replacement fields '
'contained\n'
'within a format string to define how individual values are '
'presented\n'
'(see Format String Syntax and Formatted string literals). '
'They can\n'
'also be passed directly to the built-in "format()" '
'function. Each\n'
'formattable type may define how the format specification is '
'to be\n'
'interpreted.\n'
'\n'
'Most built-in types implement the following options for '
'format\n'
'specifications, although some of the formatting options are '
'only\n'
'supported by the numeric types.\n'
'\n'
'A general convention is that an empty format string ("""") '
'produces\n'
'the same result as if you had called "str()" on the value. '
'A non-empty\n'
'format string typically modifies the result.\n'
'\n'
'The general form of a *standard format specifier* is:\n'
'\n'
' format_spec ::= '
'[[fill]align][sign][#][0][width][grouping_option][.precision][type]\n'
' fill ::= <any character>\n'
' align ::= "<" | ">" | "=" | "^"\n'
' sign ::= "+" | "-" | " "\n'
' width ::= digit+\n'
' grouping_option ::= "_" | ","\n'
' precision ::= digit+\n'
' type ::= "b" | "c" | "d" | "e" | "E" | "f" | '
'"F" | "g" | "G" | "n" | "o" | "s" | "x" | "X" | "%"\n'
'\n'
'If a valid *align* value is specified, it can be preceded '
'by a *fill*\n'
'character that can be any character and defaults to a space '
'if\n'
'omitted. It is not possible to use a literal curly brace '
'(“"{"” or\n'
'“"}"”) as the *fill* character in a formatted string '
'literal or when\n'
'using the "str.format()" method. However, it is possible '
'to insert a\n'
'curly brace with a nested replacement field. This '
'limitation doesn’t\n'
'affect the "format()" function.\n'
'\n'
'The meaning of the various alignment options is as '
'follows:\n'
'\n'
' '
'+-----------+------------------------------------------------------------+\n'
' | Option | '
'Meaning '
'|\n'
' '
'+===========+============================================================+\n'
' | "\'<\'" | Forces the field to be left-aligned '
'within the available |\n'
' | | space (this is the default for most '
'objects). |\n'
' '
'+-----------+------------------------------------------------------------+\n'
' | "\'>\'" | Forces the field to be right-aligned '
'within the available |\n'
' | | space (this is the default for '
'numbers). |\n'
' '
'+-----------+------------------------------------------------------------+\n'
' | "\'=\'" | Forces the padding to be placed after '
'the sign (if any) |\n'
' | | but before the digits. This is used for '
'printing fields |\n'
' | | in the form ‘+000000120’. This alignment '
'option is only |\n'
' | | valid for numeric types. It becomes the '
'default when ‘0’ |\n'
' | | immediately precedes the field '
'width. |\n'
' '
'+-----------+------------------------------------------------------------+\n'
' | "\'^\'" | Forces the field to be centered within '
'the available |\n'
' | | '
'space. '
'|\n'
' '
'+-----------+------------------------------------------------------------+\n'
'\n'
'Note that unless a minimum field width is defined, the '
'field width\n'
'will always be the same size as the data to fill it, so '
'that the\n'
'alignment option has no meaning in this case.\n'
'\n'
'The *sign* option is only valid for number types, and can '
'be one of\n'
'the following:\n'
'\n'
' '
'+-----------+------------------------------------------------------------+\n'
' | Option | '
'Meaning '
'|\n'
' '
'+===========+============================================================+\n'
' | "\'+\'" | indicates that a sign should be used for '
'both positive as |\n'
' | | well as negative '
'numbers. |\n'
' '
'+-----------+------------------------------------------------------------+\n'
' | "\'-\'" | indicates that a sign should be used '
'only for negative |\n'
' | | numbers (this is the default '
'behavior). |\n'
' '
'+-----------+------------------------------------------------------------+\n'
' | space | indicates that a leading space should be '
'used on positive |\n'
' | | numbers, and a minus sign on negative '
'numbers. |\n'
' '
'+-----------+------------------------------------------------------------+\n'
'\n'
'The "\'#\'" option causes the “alternate form” to be used '
'for the\n'
'conversion. The alternate form is defined differently for '
'different\n'
'types. This option is only valid for integer, float, '
'complex and\n'
'Decimal types. For integers, when binary, octal, or '
'hexadecimal output\n'
'is used, this option adds the prefix respective "\'0b\'", '
'"\'0o\'", or\n'
'"\'0x\'" to the output value. For floats, complex and '
'Decimal the\n'
'alternate form causes the result of the conversion to '
'always contain a\n'
'decimal-point character, even if no digits follow it. '
'Normally, a\n'
'decimal-point character appears in the result of these '
'conversions\n'
'only if a digit follows it. In addition, for "\'g\'" and '
'"\'G\'"\n'
'conversions, trailing zeros are not removed from the '
'result.\n'
'\n'
'The "\',\'" option signals the use of a comma for a '
'thousands separator.\n'
'For a locale aware separator, use the "\'n\'" integer '
'presentation type\n'
'instead.\n'
'\n'
'Changed in version 3.1: Added the "\',\'" option (see also '
'**PEP 378**).\n'
'\n'
'The "\'_\'" option signals the use of an underscore for a '
'thousands\n'
'separator for floating point presentation types and for '
'integer\n'
'presentation type "\'d\'". For integer presentation types '
'"\'b\'", "\'o\'",\n'
'"\'x\'", and "\'X\'", underscores will be inserted every 4 '
'digits. For\n'
'other presentation types, specifying this option is an '
'error.\n'
'\n'
'Changed in version 3.6: Added the "\'_\'" option (see also '
'**PEP 515**).\n'
'\n'
'*width* is a decimal integer defining the minimum field '
'width. If not\n'
'specified, then the field width will be determined by the '
'content.\n'
'\n'
'When no explicit alignment is given, preceding the *width* '
'field by a\n'
'zero ("\'0\'") character enables sign-aware zero-padding '
'for numeric\n'
'types. This is equivalent to a *fill* character of "\'0\'" '
'with an\n'
'*alignment* type of "\'=\'".\n'
'\n'
'The *precision* is a decimal number indicating how many '
'digits should\n'
'be displayed after the decimal point for a floating point '
'value\n'
'formatted with "\'f\'" and "\'F\'", or before and after the '
'decimal point\n'
'for a floating point value formatted with "\'g\'" or '
'"\'G\'". For non-\n'
'number types the field indicates the maximum field size - '
'in other\n'
'words, how many characters will be used from the field '
'content. The\n'
'*precision* is not allowed for integer values.\n'
'\n'
'Finally, the *type* determines how the data should be '
'presented.\n'
'\n'
'The available string presentation types are:\n'
'\n'
' '
'+-----------+------------------------------------------------------------+\n'
' | Type | '
'Meaning '
'|\n'
' '
'+===========+============================================================+\n'
' | "\'s\'" | String format. This is the default type '
'for strings and |\n'
' | | may be '
'omitted. |\n'
' '
'+-----------+------------------------------------------------------------+\n'
' | None | The same as '
'"\'s\'". |\n'
' '
'+-----------+------------------------------------------------------------+\n'
'\n'
'The available integer presentation types are:\n'
'\n'
' '
'+-----------+------------------------------------------------------------+\n'
' | Type | '
'Meaning '
'|\n'
' '
'+===========+============================================================+\n'
' | "\'b\'" | Binary format. Outputs the number in '
'base 2. |\n'
' '
'+-----------+------------------------------------------------------------+\n'
' | "\'c\'" | Character. Converts the integer to the '
'corresponding |\n'
' | | unicode character before '
'printing. |\n'
' '
'+-----------+------------------------------------------------------------+\n'
' | "\'d\'" | Decimal Integer. Outputs the number in '
'base 10. |\n'
' '
'+-----------+------------------------------------------------------------+\n'
' | "\'o\'" | Octal format. Outputs the number in base '
'8. |\n'
' '
'+-----------+------------------------------------------------------------+\n'
' | "\'x\'" | Hex format. Outputs the number in base '
'16, using lower- |\n'
' | | case letters for the digits above '
'9. |\n'
' '
'+-----------+------------------------------------------------------------+\n'
' | "\'X\'" | Hex format. Outputs the number in base '
'16, using upper- |\n'
' | | case letters for the digits above '
'9. |\n'
' '
'+-----------+------------------------------------------------------------+\n'
' | "\'n\'" | Number. This is the same as "\'d\'", '
'except that it uses the |\n'
' | | current locale setting to insert the '
'appropriate number |\n'
' | | separator '
'characters. |\n'
' '
'+-----------+------------------------------------------------------------+\n'
' | None | The same as '
'"\'d\'". |\n'
' '
'+-----------+------------------------------------------------------------+\n'
'\n'
'In addition to the above presentation types, integers can '
'be formatted\n'
'with the floating point presentation types listed below '
'(except "\'n\'"\n'
'and "None"). When doing so, "float()" is used to convert '
'the integer\n'
'to a floating point number before formatting.\n'
'\n'
'The available presentation types for floating point and '
'decimal values\n'
'are:\n'
'\n'
' '
'+-----------+------------------------------------------------------------+\n'
' | Type | '
'Meaning '
'|\n'
' '
'+===========+============================================================+\n'
' | "\'e\'" | Exponent notation. Prints the number in '
'scientific |\n'
' | | notation using the letter ‘e’ to indicate '
'the exponent. |\n'
' | | The default precision is '
'"6". |\n'
' '
'+-----------+------------------------------------------------------------+\n'
' | "\'E\'" | Exponent notation. Same as "\'e\'" '
'except it uses an upper |\n'
' | | case ‘E’ as the separator '
'character. |\n'
' '
'+-----------+------------------------------------------------------------+\n'
' | "\'f\'" | Fixed point. Displays the number as a '
'fixed-point number. |\n'
' | | The default precision is '
'"6". |\n'
' '
'+-----------+------------------------------------------------------------+\n'
' | "\'F\'" | Fixed point. Same as "\'f\'", but '
'converts "nan" to "NAN" |\n'
' | | and "inf" to '
'"INF". |\n'
' '
'+-----------+------------------------------------------------------------+\n'
' | "\'g\'" | General format. For a given precision '
'"p >= 1", this |\n'
' | | rounds the number to "p" significant '
'digits and then |\n'
' | | formats the result in either fixed-point '
'format or in |\n'
' | | scientific notation, depending on its '
'magnitude. The |\n'
' | | precise rules are as follows: suppose that '
'the result |\n'
' | | formatted with presentation type "\'e\'" '
'and precision "p-1" |\n'
' | | would have exponent "exp". Then if "-4 <= '
'exp < p", the |\n'
' | | number is formatted with presentation type '
'"\'f\'" and |\n'
' | | precision "p-1-exp". Otherwise, the '
'number is formatted |\n'
' | | with presentation type "\'e\'" and '
'precision "p-1". In both |\n'
' | | cases insignificant trailing zeros are '
'removed from the |\n'
' | | significand, and the decimal point is also '
'removed if |\n'
' | | there are no remaining digits following '
'it. Positive and |\n'
' | | negative infinity, positive and negative '
'zero, and nans, |\n'
' | | are formatted as "inf", "-inf", "0", "-0" '
'and "nan" |\n'
' | | respectively, regardless of the '
'precision. A precision of |\n'
' | | "0" is treated as equivalent to a '
'precision of "1". The |\n'
' | | default precision is '
'"6". |\n'
' '
'+-----------+------------------------------------------------------------+\n'
' | "\'G\'" | General format. Same as "\'g\'" except '
'switches to "\'E\'" if |\n'
' | | the number gets too large. The '
'representations of infinity |\n'
' | | and NaN are uppercased, '
'too. |\n'
' '
'+-----------+------------------------------------------------------------+\n'
' | "\'n\'" | Number. This is the same as "\'g\'", '
'except that it uses the |\n'
' | | current locale setting to insert the '
'appropriate number |\n'
' | | separator '
'characters. |\n'
' '
'+-----------+------------------------------------------------------------+\n'
' | "\'%\'" | Percentage. Multiplies the number by 100 '
'and displays in |\n'
' | | fixed ("\'f\'") format, followed by a '
'percent sign. |\n'
' '
'+-----------+------------------------------------------------------------+\n'
' | None | Similar to "\'g\'", except that '
'fixed-point notation, when |\n'
' | | used, has at least one digit past the '
'decimal point. The |\n'
' | | default precision is as high as needed to '
'represent the |\n'
' | | particular value. The overall effect is to '
'match the |\n'
' | | output of "str()" as altered by the other '
'format |\n'
' | | '
'modifiers. '
'|\n'
' '
'+-----------+------------------------------------------------------------+\n'
'\n'
'\n'
'Format examples\n'
'===============\n'
'\n'
'This section contains examples of the "str.format()" syntax '
'and\n'
'comparison with the old "%"-formatting.\n'
'\n'
'In most of the cases the syntax is similar to the old '
'"%"-formatting,\n'
'with the addition of the "{}" and with ":" used instead of '
'"%". For\n'
'example, "\'%03.2f\'" can be translated to "\'{:03.2f}\'".\n'
'\n'
'The new format syntax also supports new and different '
'options, shown\n'
'in the follow examples.\n'
'\n'
'Accessing arguments by position:\n'
'\n'
" >>> '{0}, {1}, {2}'.format('a', 'b', 'c')\n"
" 'a, b, c'\n"
" >>> '{}, {}, {}'.format('a', 'b', 'c') # 3.1+ only\n"
" 'a, b, c'\n"
" >>> '{2}, {1}, {0}'.format('a', 'b', 'c')\n"
" 'c, b, a'\n"
" >>> '{2}, {1}, {0}'.format(*'abc') # unpacking "
'argument sequence\n'
" 'c, b, a'\n"
" >>> '{0}{1}{0}'.format('abra', 'cad') # arguments' "
'indices can be repeated\n'
" 'abracadabra'\n"
'\n'
'Accessing arguments by name:\n'
'\n'
" >>> 'Coordinates: {latitude}, "
"{longitude}'.format(latitude='37.24N', "
"longitude='-115.81W')\n"
" 'Coordinates: 37.24N, -115.81W'\n"
" >>> coord = {'latitude': '37.24N', 'longitude': "
"'-115.81W'}\n"
" >>> 'Coordinates: {latitude}, "
"{longitude}'.format(**coord)\n"
" 'Coordinates: 37.24N, -115.81W'\n"
'\n'
'Accessing arguments’ attributes:\n'
'\n'
' >>> c = 3-5j\n'
" >>> ('The complex number {0} is formed from the real "
"part {0.real} '\n"
" ... 'and the imaginary part {0.imag}.').format(c)\n"
" 'The complex number (3-5j) is formed from the real part "
"3.0 and the imaginary part -5.0.'\n"
' >>> class Point:\n'
' ... def __init__(self, x, y):\n'
' ... self.x, self.y = x, y\n'
' ... def __str__(self):\n'
" ... return 'Point({self.x}, "
"{self.y})'.format(self=self)\n"
' ...\n'
' >>> str(Point(4, 2))\n'
" 'Point(4, 2)'\n"
'\n'
'Accessing arguments’ items:\n'
'\n'
' >>> coord = (3, 5)\n'
" >>> 'X: {0[0]}; Y: {0[1]}'.format(coord)\n"
" 'X: 3; Y: 5'\n"
'\n'
'Replacing "%s" and "%r":\n'
'\n'
' >>> "repr() shows quotes: {!r}; str() doesn\'t: '
'{!s}".format(\'test1\', \'test2\')\n'
' "repr() shows quotes: \'test1\'; str() doesn\'t: test2"\n'
'\n'
'Aligning the text and specifying a width:\n'
'\n'
" >>> '{:<30}'.format('left aligned')\n"
" 'left aligned '\n"
" >>> '{:>30}'.format('right aligned')\n"
" ' right aligned'\n"
" >>> '{:^30}'.format('centered')\n"
" ' centered '\n"
" >>> '{:*^30}'.format('centered') # use '*' as a fill "
'char\n'
" '***********centered***********'\n"
'\n'
'Replacing "%+f", "%-f", and "% f" and specifying a sign:\n'
'\n'
" >>> '{:+f}; {:+f}'.format(3.14, -3.14) # show it "
'always\n'
" '+3.140000; -3.140000'\n"
" >>> '{: f}; {: f}'.format(3.14, -3.14) # show a space "
'for positive numbers\n'
" ' 3.140000; -3.140000'\n"
" >>> '{:-f}; {:-f}'.format(3.14, -3.14) # show only the "
"minus -- same as '{:f}; {:f}'\n"
" '3.140000; -3.140000'\n"
'\n'
'Replacing "%x" and "%o" and converting the value to '
'different bases:\n'
'\n'
' >>> # format also supports binary numbers\n'
' >>> "int: {0:d}; hex: {0:x}; oct: {0:o}; bin: '
'{0:b}".format(42)\n'
" 'int: 42; hex: 2a; oct: 52; bin: 101010'\n"
' >>> # with 0x, 0o, or 0b as prefix:\n'
' >>> "int: {0:d}; hex: {0:#x}; oct: {0:#o}; bin: '
'{0:#b}".format(42)\n'
" 'int: 42; hex: 0x2a; oct: 0o52; bin: 0b101010'\n"
'\n'
'Using the comma as a thousands separator:\n'
'\n'
" >>> '{:,}'.format(1234567890)\n"
" '1,234,567,890'\n"
'\n'
'Expressing a percentage:\n'
'\n'
' >>> points = 19\n'
' >>> total = 22\n'
" >>> 'Correct answers: {:.2%}'.format(points/total)\n"
" 'Correct answers: 86.36%'\n"
'\n'
'Using type-specific formatting:\n'
'\n'
' >>> import datetime\n'
' >>> d = datetime.datetime(2010, 7, 4, 12, 15, 58)\n'
" >>> '{:%Y-%m-%d %H:%M:%S}'.format(d)\n"
" '2010-07-04 12:15:58'\n"
'\n'
'Nesting arguments and more complex examples:\n'
'\n'
" >>> for align, text in zip('<^>', ['left', 'center', "
"'right']):\n"
" ... '{0:{fill}{align}16}'.format(text, fill=align, "
'align=align)\n'
' ...\n'
" 'left<<<<<<<<<<<<'\n"
" '^^^^^center^^^^^'\n"
" '>>>>>>>>>>>right'\n"
' >>>\n'
' >>> octets = [192, 168, 0, 1]\n'
" >>> '{:02X}{:02X}{:02X}{:02X}'.format(*octets)\n"
" 'C0A80001'\n"
' >>> int(_, 16)\n'
' 3232235521\n'
' >>>\n'
' >>> width = 5\n'
' >>> for num in range(5,12): #doctest: '
'+NORMALIZE_WHITESPACE\n'
" ... for base in 'dXob':\n"
" ... print('{0:{width}{base}}'.format(num, "
"base=base, width=width), end=' ')\n"
' ... print()\n'
' ...\n'
' 5 5 5 101\n'
' 6 6 6 110\n'
' 7 7 7 111\n'
' 8 8 10 1000\n'
' 9 9 11 1001\n'
' 10 A 12 1010\n'
' 11 B 13 1011\n',
'function': 'Function definitions\n'
'********************\n'
'\n'
'A function definition defines a user-defined function object '
'(see\n'
'section The standard type hierarchy):\n'
'\n'
' funcdef ::= [decorators] "def" funcname "(" '
'[parameter_list] ")" ["->" expression] ":" suite\n'
' decorators ::= decorator+\n'
' decorator ::= "@" dotted_name ["(" '
'[argument_list [","]] ")"] NEWLINE\n'
' dotted_name ::= identifier ("." identifier)*\n'
' parameter_list ::= defparameter ("," defparameter)* '
'["," [parameter_list_starargs]]\n'
' | parameter_list_starargs\n'
' parameter_list_starargs ::= "*" [parameter] ("," '
'defparameter)* ["," ["**" parameter [","]]]\n'
' | "**" parameter [","]\n'
' parameter ::= identifier [":" expression]\n'
' defparameter ::= parameter ["=" expression]\n'
' funcname ::= identifier\n'
'\n'
'A function definition is an executable statement. Its execution '
'binds\n'
'the function name in the current local namespace to a function '
'object\n'
'(a wrapper around the executable code for the function). This\n'
'function object contains a reference to the current global '
'namespace\n'
'as the global namespace to be used when the function is called.\n'
'\n'
'The function definition does not execute the function body; this '
'gets\n'
'executed only when the function is called. [3]\n'
'\n'
'A function definition may be wrapped by one or more *decorator*\n'
'expressions. Decorator expressions are evaluated when the '
'function is\n'
'defined, in the scope that contains the function definition. '
'The\n'
'result must be a callable, which is invoked with the function '
'object\n'
'as the only argument. The returned value is bound to the '
'function name\n'
'instead of the function object. Multiple decorators are applied '
'in\n'
'nested fashion. For example, the following code\n'
'\n'
' @f1(arg)\n'
' @f2\n'
' def func(): pass\n'
'\n'
'is roughly equivalent to\n'
'\n'
' def func(): pass\n'
' func = f1(arg)(f2(func))\n'
'\n'
'except that the original function is not temporarily bound to '
'the name\n'
'"func".\n'
'\n'
'When one or more *parameters* have the form *parameter* "="\n'
'*expression*, the function is said to have “default parameter '
'values.”\n'
'For a parameter with a default value, the corresponding '
'*argument* may\n'
'be omitted from a call, in which case the parameter’s default '
'value is\n'
'substituted. If a parameter has a default value, all following\n'
'parameters up until the “"*"” must also have a default value — '
'this is\n'
'a syntactic restriction that is not expressed by the grammar.\n'
'\n'
'**Default parameter values are evaluated from left to right when '
'the\n'
'function definition is executed.** This means that the '
'expression is\n'
'evaluated once, when the function is defined, and that the same '
'“pre-\n'
'computed” value is used for each call. This is especially '
'important\n'
'to understand when a default parameter is a mutable object, such '
'as a\n'
'list or a dictionary: if the function modifies the object (e.g. '
'by\n'
'appending an item to a list), the default value is in effect '
'modified.\n'
'This is generally not what was intended. A way around this is '
'to use\n'
'"None" as the default, and explicitly test for it in the body of '
'the\n'
'function, e.g.:\n'
'\n'
' def whats_on_the_telly(penguin=None):\n'
' if penguin is None:\n'
' penguin = []\n'
' penguin.append("property of the zoo")\n'
' return penguin\n'
'\n'
'Function call semantics are described in more detail in section '
'Calls.\n'
'A function call always assigns values to all parameters '
'mentioned in\n'
'the parameter list, either from position arguments, from '
'keyword\n'
'arguments, or from default values. If the form “"*identifier"” '
'is\n'
'present, it is initialized to a tuple receiving any excess '
'positional\n'
'parameters, defaulting to the empty tuple. If the form\n'
'“"**identifier"” is present, it is initialized to a new ordered\n'
'mapping receiving any excess keyword arguments, defaulting to a '
'new\n'
'empty mapping of the same type. Parameters after “"*"” or\n'
'“"*identifier"” are keyword-only parameters and may only be '
'passed\n'
'used keyword arguments.\n'
'\n'
'Parameters may have annotations of the form “": expression"” '
'following\n'
'the parameter name. Any parameter may have an annotation even '
'those\n'
'of the form "*identifier" or "**identifier". Functions may '
'have\n'
'“return” annotation of the form “"-> expression"” after the '
'parameter\n'
'list. These annotations can be any valid Python expression and '
'are\n'
'evaluated when the function definition is executed. Annotations '
'may\n'
'be evaluated in a different order than they appear in the source '
'code.\n'
'The presence of annotations does not change the semantics of a\n'
'function. The annotation values are available as values of a\n'
'dictionary keyed by the parameters’ names in the '
'"__annotations__"\n'
'attribute of the function object.\n'
'\n'
'It is also possible to create anonymous functions (functions not '
'bound\n'
'to a name), for immediate use in expressions. This uses lambda\n'
'expressions, described in section Lambdas. Note that the '
'lambda\n'
'expression is merely a shorthand for a simplified function '
'definition;\n'
'a function defined in a “"def"” statement can be passed around '
'or\n'
'assigned to another name just like a function defined by a '
'lambda\n'
'expression. The “"def"” form is actually more powerful since '
'it\n'
'allows the execution of multiple statements and annotations.\n'
'\n'
'**Programmer’s note:** Functions are first-class objects. A '
'“"def"”\n'
'statement executed inside a function definition defines a local\n'
'function that can be returned or passed around. Free variables '
'used\n'
'in the nested function can access the local variables of the '
'function\n'
'containing the def. See section Naming and binding for '
'details.\n'
'\n'
'See also:\n'
'\n'
' **PEP 3107** - Function Annotations\n'
' The original specification for function annotations.\n',
'global': 'The "global" statement\n'
'**********************\n'
'\n'
' global_stmt ::= "global" identifier ("," identifier)*\n'
'\n'
'The "global" statement is a declaration which holds for the '
'entire\n'
'current code block. It means that the listed identifiers are to '
'be\n'
'interpreted as globals. It would be impossible to assign to a '
'global\n'
'variable without "global", although free variables may refer to\n'
'globals without being declared global.\n'
'\n'
'Names listed in a "global" statement must not be used in the same '
'code\n'
'block textually preceding that "global" statement.\n'
'\n'
'Names listed in a "global" statement must not be defined as '
'formal\n'
'parameters or in a "for" loop control target, "class" definition,\n'
'function definition, "import" statement, or variable annotation.\n'
'\n'
'**CPython implementation detail:** The current implementation does '
'not\n'
'enforce some of these restrictions, but programs should not abuse '
'this\n'
'freedom, as future implementations may enforce them or silently '
'change\n'
'the meaning of the program.\n'
'\n'
'**Programmer’s note:** "global" is a directive to the parser. It\n'
'applies only to code parsed at the same time as the "global"\n'
'statement. In particular, a "global" statement contained in a '
'string\n'
'or code object supplied to the built-in "exec()" function does '
'not\n'
'affect the code block *containing* the function call, and code\n'
'contained in such a string is unaffected by "global" statements in '
'the\n'
'code containing the function call. The same applies to the '
'"eval()"\n'
'and "compile()" functions.\n',
'id-classes': 'Reserved classes of identifiers\n'
'*******************************\n'
'\n'
'Certain classes of identifiers (besides keywords) have '
'special\n'
'meanings. These classes are identified by the patterns of '
'leading and\n'
'trailing underscore characters:\n'
'\n'
'"_*"\n'
' Not imported by "from module import *". The special '
'identifier "_"\n'
' is used in the interactive interpreter to store the result '
'of the\n'
' last evaluation; it is stored in the "builtins" module. '
'When not\n'
' in interactive mode, "_" has no special meaning and is not '
'defined.\n'
' See section The import statement.\n'
'\n'
' Note: The name "_" is often used in conjunction with\n'
' internationalization; refer to the documentation for the\n'
' "gettext" module for more information on this '
'convention.\n'
'\n'
'"__*__"\n'
' System-defined names. These names are defined by the '
'interpreter\n'
' and its implementation (including the standard library). '
'Current\n'
' system names are discussed in the Special method names '
'section and\n'
' elsewhere. More will likely be defined in future versions '
'of\n'
' Python. *Any* use of "__*__" names, in any context, that '
'does not\n'
' follow explicitly documented use, is subject to breakage '
'without\n'
' warning.\n'
'\n'
'"__*"\n'
' Class-private names. Names in this category, when used '
'within the\n'
' context of a class definition, are re-written to use a '
'mangled form\n'
' to help avoid name clashes between “private” attributes of '
'base and\n'
' derived classes. See section Identifiers (Names).\n',
'identifiers': 'Identifiers and keywords\n'
'************************\n'
'\n'
'Identifiers (also referred to as *names*) are described by '
'the\n'
'following lexical definitions.\n'
'\n'
'The syntax of identifiers in Python is based on the Unicode '
'standard\n'
'annex UAX-31, with elaboration and changes as defined below; '
'see also\n'
'**PEP 3131** for further details.\n'
'\n'
'Within the ASCII range (U+0001..U+007F), the valid characters '
'for\n'
'identifiers are the same as in Python 2.x: the uppercase and '
'lowercase\n'
'letters "A" through "Z", the underscore "_" and, except for '
'the first\n'
'character, the digits "0" through "9".\n'
'\n'
'Python 3.0 introduces additional characters from outside the '
'ASCII\n'
'range (see **PEP 3131**). For these characters, the '
'classification\n'
'uses the version of the Unicode Character Database as '
'included in the\n'
'"unicodedata" module.\n'
'\n'
'Identifiers are unlimited in length. Case is significant.\n'
'\n'
' identifier ::= xid_start xid_continue*\n'
' id_start ::= <all characters in general categories Lu, '
'Ll, Lt, Lm, Lo, Nl, the underscore, and characters with the '
'Other_ID_Start property>\n'
' id_continue ::= <all characters in id_start, plus '
'characters in the categories Mn, Mc, Nd, Pc and others with '
'the Other_ID_Continue property>\n'
' xid_start ::= <all characters in id_start whose NFKC '
'normalization is in "id_start xid_continue*">\n'
' xid_continue ::= <all characters in id_continue whose NFKC '
'normalization is in "id_continue*">\n'
'\n'
'The Unicode category codes mentioned above stand for:\n'
'\n'
'* *Lu* - uppercase letters\n'
'\n'
'* *Ll* - lowercase letters\n'
'\n'
'* *Lt* - titlecase letters\n'
'\n'
'* *Lm* - modifier letters\n'
'\n'
'* *Lo* - other letters\n'
'\n'
'* *Nl* - letter numbers\n'
'\n'
'* *Mn* - nonspacing marks\n'
'\n'
'* *Mc* - spacing combining marks\n'
'\n'
'* *Nd* - decimal numbers\n'
'\n'
'* *Pc* - connector punctuations\n'
'\n'
'* *Other_ID_Start* - explicit list of characters in '
'PropList.txt to\n'
' support backwards compatibility\n'
'\n'
'* *Other_ID_Continue* - likewise\n'
'\n'
'All identifiers are converted into the normal form NFKC while '
'parsing;\n'
'comparison of identifiers is based on NFKC.\n'
'\n'
'A non-normative HTML file listing all valid identifier '
'characters for\n'
'Unicode 4.1 can be found at https://www.dcl.hpi.uni-\n'
'potsdam.de/home/loewis/table-3131.html.\n'
'\n'
'\n'
'Keywords\n'
'========\n'
'\n'
'The following identifiers are used as reserved words, or '
'*keywords* of\n'
'the language, and cannot be used as ordinary identifiers. '
'They must\n'
'be spelled exactly as written here:\n'
'\n'
' False class finally is return\n'
' None continue for lambda try\n'
' True def from nonlocal while\n'
' and del global not with\n'
' as elif if or yield\n'
' assert else import pass\n'
' break except in raise\n'
'\n'
'\n'
'Reserved classes of identifiers\n'
'===============================\n'
'\n'
'Certain classes of identifiers (besides keywords) have '
'special\n'
'meanings. These classes are identified by the patterns of '
'leading and\n'
'trailing underscore characters:\n'
'\n'
'"_*"\n'
' Not imported by "from module import *". The special '
'identifier "_"\n'
' is used in the interactive interpreter to store the result '
'of the\n'
' last evaluation; it is stored in the "builtins" module. '
'When not\n'
' in interactive mode, "_" has no special meaning and is not '
'defined.\n'
' See section The import statement.\n'
'\n'
' Note: The name "_" is often used in conjunction with\n'
' internationalization; refer to the documentation for '
'the\n'
' "gettext" module for more information on this '
'convention.\n'
'\n'
'"__*__"\n'
' System-defined names. These names are defined by the '
'interpreter\n'
' and its implementation (including the standard library). '
'Current\n'
' system names are discussed in the Special method names '
'section and\n'
' elsewhere. More will likely be defined in future versions '
'of\n'
' Python. *Any* use of "__*__" names, in any context, that '
'does not\n'
' follow explicitly documented use, is subject to breakage '
'without\n'
' warning.\n'
'\n'
'"__*"\n'
' Class-private names. Names in this category, when used '
'within the\n'
' context of a class definition, are re-written to use a '
'mangled form\n'
' to help avoid name clashes between “private” attributes of '
'base and\n'
' derived classes. See section Identifiers (Names).\n',
'if': 'The "if" statement\n'
'******************\n'
'\n'
'The "if" statement is used for conditional execution:\n'
'\n'
' if_stmt ::= "if" expression ":" suite\n'
' ( "elif" expression ":" suite )*\n'
' ["else" ":" suite]\n'
'\n'
'It selects exactly one of the suites by evaluating the expressions '
'one\n'
'by one until one is found to be true (see section Boolean operations\n'
'for the definition of true and false); then that suite is executed\n'
'(and no other part of the "if" statement is executed or evaluated).\n'
'If all expressions are false, the suite of the "else" clause, if\n'
'present, is executed.\n',
'imaginary': 'Imaginary literals\n'
'******************\n'
'\n'
'Imaginary literals are described by the following lexical '
'definitions:\n'
'\n'
' imagnumber ::= (floatnumber | digitpart) ("j" | "J")\n'
'\n'
'An imaginary literal yields a complex number with a real part '
'of 0.0.\n'
'Complex numbers are represented as a pair of floating point '
'numbers\n'
'and have the same restrictions on their range. To create a '
'complex\n'
'number with a nonzero real part, add a floating point number to '
'it,\n'
'e.g., "(3+4j)". Some examples of imaginary literals:\n'
'\n'
' 3.14j 10.j 10j .001j 1e100j 3.14e-10j '
'3.14_15_93j\n',
'import': 'The "import" statement\n'
'**********************\n'
'\n'
' import_stmt ::= "import" module ["as" name] ( "," module '
'["as" name] )*\n'
' | "from" relative_module "import" identifier '
'["as" name]\n'
' ( "," identifier ["as" name] )*\n'
' | "from" relative_module "import" "(" '
'identifier ["as" name]\n'
' ( "," identifier ["as" name] )* [","] ")"\n'
' | "from" module "import" "*"\n'
' module ::= (identifier ".")* identifier\n'
' relative_module ::= "."* module | "."+\n'
' name ::= identifier\n'
'\n'
'The basic import statement (no "from" clause) is executed in two\n'
'steps:\n'
'\n'
'1. find a module, loading and initializing it if necessary\n'
'\n'
'2. define a name or names in the local namespace for the scope\n'
' where the "import" statement occurs.\n'
'\n'
'When the statement contains multiple clauses (separated by commas) '
'the\n'
'two steps are carried out separately for each clause, just as '
'though\n'
'the clauses had been separated out into individual import '
'statements.\n'
'\n'
'The details of the first step, finding and loading modules are\n'
'described in greater detail in the section on the import system, '
'which\n'
'also describes the various types of packages and modules that can '
'be\n'
'imported, as well as all the hooks that can be used to customize '
'the\n'
'import system. Note that failures in this step may indicate '
'either\n'
'that the module could not be located, *or* that an error occurred\n'
'while initializing the module, which includes execution of the\n'
'module’s code.\n'
'\n'
'If the requested module is retrieved successfully, it will be '
'made\n'
'available in the local namespace in one of three ways:\n'
'\n'
'* If the module name is followed by "as", then the name following\n'
' "as" is bound directly to the imported module.\n'
'\n'
'* If no other name is specified, and the module being imported is '
'a\n'
' top level module, the module’s name is bound in the local '
'namespace\n'
' as a reference to the imported module\n'
'\n'
'* If the module being imported is *not* a top level module, then '
'the\n'
' name of the top level package that contains the module is bound '
'in\n'
' the local namespace as a reference to the top level package. '
'The\n'
' imported module must be accessed using its full qualified name\n'
' rather than directly\n'
'\n'
'The "from" form uses a slightly more complex process:\n'
'\n'
'1. find the module specified in the "from" clause, loading and\n'
' initializing it if necessary;\n'
'\n'
'2. for each of the identifiers specified in the "import" clauses:\n'
'\n'
' 1. check if the imported module has an attribute by that name\n'
'\n'
' 2. if not, attempt to import a submodule with that name and '
'then\n'
' check the imported module again for that attribute\n'
'\n'
' 3. if the attribute is not found, "ImportError" is raised.\n'
'\n'
' 4. otherwise, a reference to that value is stored in the local\n'
' namespace, using the name in the "as" clause if it is '
'present,\n'
' otherwise using the attribute name\n'
'\n'
'Examples:\n'
'\n'
' import foo # foo imported and bound locally\n'
' import foo.bar.baz # foo.bar.baz imported, foo bound '
'locally\n'
' import foo.bar.baz as fbb # foo.bar.baz imported and bound as '
'fbb\n'
' from foo.bar import baz # foo.bar.baz imported and bound as '
'baz\n'
' from foo import attr # foo imported and foo.attr bound as '
'attr\n'
'\n'
'If the list of identifiers is replaced by a star ("\'*\'"), all '
'public\n'
'names defined in the module are bound in the local namespace for '
'the\n'
'scope where the "import" statement occurs.\n'
'\n'
'The *public names* defined by a module are determined by checking '
'the\n'
'module’s namespace for a variable named "__all__"; if defined, it '
'must\n'
'be a sequence of strings which are names defined or imported by '
'that\n'
'module. The names given in "__all__" are all considered public '
'and\n'
'are required to exist. If "__all__" is not defined, the set of '
'public\n'
'names includes all names found in the module’s namespace which do '
'not\n'
'begin with an underscore character ("\'_\'"). "__all__" should '
'contain\n'
'the entire public API. It is intended to avoid accidentally '
'exporting\n'
'items that are not part of the API (such as library modules which '
'were\n'
'imported and used within the module).\n'
'\n'
'The wild card form of import — "from module import *" — is only\n'
'allowed at the module level. Attempting to use it in class or\n'
'function definitions will raise a "SyntaxError".\n'
'\n'
'When specifying what module to import you do not have to specify '
'the\n'
'absolute name of the module. When a module or package is '
'contained\n'
'within another package it is possible to make a relative import '
'within\n'
'the same top package without having to mention the package name. '
'By\n'
'using leading dots in the specified module or package after "from" '
'you\n'
'can specify how high to traverse up the current package hierarchy\n'
'without specifying exact names. One leading dot means the current\n'
'package where the module making the import exists. Two dots means '
'up\n'
'one package level. Three dots is up two levels, etc. So if you '
'execute\n'
'"from . import mod" from a module in the "pkg" package then you '
'will\n'
'end up importing "pkg.mod". If you execute "from ..subpkg2 import '
'mod"\n'
'from within "pkg.subpkg1" you will import "pkg.subpkg2.mod". The\n'
'specification for relative imports is contained within **PEP '
'328**.\n'
'\n'
'"importlib.import_module()" is provided to support applications '
'that\n'
'determine dynamically the modules to be loaded.\n'
'\n'
'\n'
'Future statements\n'
'=================\n'
'\n'
'A *future statement* is a directive to the compiler that a '
'particular\n'
'module should be compiled using syntax or semantics that will be\n'
'available in a specified future release of Python where the '
'feature\n'
'becomes standard.\n'
'\n'
'The future statement is intended to ease migration to future '
'versions\n'
'of Python that introduce incompatible changes to the language. '
'It\n'
'allows use of the new features on a per-module basis before the\n'
'release in which the feature becomes standard.\n'
'\n'
' future_statement ::= "from" "__future__" "import" feature ["as" '
'name]\n'
' ("," feature ["as" name])*\n'
' | "from" "__future__" "import" "(" feature '
'["as" name]\n'
' ("," feature ["as" name])* [","] ")"\n'
' feature ::= identifier\n'
' name ::= identifier\n'
'\n'
'A future statement must appear near the top of the module. The '
'only\n'
'lines that can appear before a future statement are:\n'
'\n'
'* the module docstring (if any),\n'
'\n'
'* comments,\n'
'\n'
'* blank lines, and\n'
'\n'
'* other future statements.\n'
'\n'
'The features recognized by Python 3.0 are "absolute_import",\n'
'"division", "generators", "unicode_literals", "print_function",\n'
'"nested_scopes" and "with_statement". They are all redundant '
'because\n'
'they are always enabled, and only kept for backwards '
'compatibility.\n'
'\n'
'A future statement is recognized and treated specially at compile\n'
'time: Changes to the semantics of core constructs are often\n'
'implemented by generating different code. It may even be the '
'case\n'
'that a new feature introduces new incompatible syntax (such as a '
'new\n'
'reserved word), in which case the compiler may need to parse the\n'
'module differently. Such decisions cannot be pushed off until\n'
'runtime.\n'
'\n'
'For any given release, the compiler knows which feature names '
'have\n'
'been defined, and raises a compile-time error if a future '
'statement\n'
'contains a feature not known to it.\n'
'\n'
'The direct runtime semantics are the same as for any import '
'statement:\n'
'there is a standard module "__future__", described later, and it '
'will\n'
'be imported in the usual way at the time the future statement is\n'
'executed.\n'
'\n'
'The interesting runtime semantics depend on the specific feature\n'
'enabled by the future statement.\n'
'\n'
'Note that there is nothing special about the statement:\n'
'\n'
' import __future__ [as name]\n'
'\n'
'That is not a future statement; it’s an ordinary import statement '
'with\n'
'no special semantics or syntax restrictions.\n'
'\n'
'Code compiled by calls to the built-in functions "exec()" and\n'
'"compile()" that occur in a module "M" containing a future '
'statement\n'
'will, by default, use the new syntax or semantics associated with '
'the\n'
'future statement. This can be controlled by optional arguments '
'to\n'
'"compile()" — see the documentation of that function for details.\n'
'\n'
'A future statement typed at an interactive interpreter prompt '
'will\n'
'take effect for the rest of the interpreter session. If an\n'
'interpreter is started with the "-i" option, is passed a script '
'name\n'
'to execute, and the script includes a future statement, it will be '
'in\n'
'effect in the interactive session started after the script is\n'
'executed.\n'
'\n'
'See also:\n'
'\n'
' **PEP 236** - Back to the __future__\n'
' The original proposal for the __future__ mechanism.\n',
'in': 'Membership test operations\n'
'**************************\n'
'\n'
'The operators "in" and "not in" test for membership. "x in s"\n'
'evaluates to "True" if *x* is a member of *s*, and "False" otherwise.\n'
'"x not in s" returns the negation of "x in s". All built-in '
'sequences\n'
'and set types support this as well as dictionary, for which "in" '
'tests\n'
'whether the dictionary has a given key. For container types such as\n'
'list, tuple, set, frozenset, dict, or collections.deque, the\n'
'expression "x in y" is equivalent to "any(x is e or x == e for e in\n'
'y)".\n'
'\n'
'For the string and bytes types, "x in y" is "True" if and only if *x*\n'
'is a substring of *y*. An equivalent test is "y.find(x) != -1".\n'
'Empty strings are always considered to be a substring of any other\n'
'string, so """ in "abc"" will return "True".\n'
'\n'
'For user-defined classes which define the "__contains__()" method, "x\n'
'in y" returns "True" if "y.__contains__(x)" returns a true value, and\n'
'"False" otherwise.\n'
'\n'
'For user-defined classes which do not define "__contains__()" but do\n'
'define "__iter__()", "x in y" is "True" if some value "z" with "x ==\n'
'z" is produced while iterating over "y". If an exception is raised\n'
'during the iteration, it is as if "in" raised that exception.\n'
'\n'
'Lastly, the old-style iteration protocol is tried: if a class defines\n'
'"__getitem__()", "x in y" is "True" if and only if there is a non-\n'
'negative integer index *i* such that "x == y[i]", and all lower\n'
'integer indices do not raise "IndexError" exception. (If any other\n'
'exception is raised, it is as if "in" raised that exception).\n'
'\n'
'The operator "not in" is defined to have the inverse true value of\n'
'"in".\n',
'integers': 'Integer literals\n'
'****************\n'
'\n'
'Integer literals are described by the following lexical '
'definitions:\n'
'\n'
' integer ::= decinteger | bininteger | octinteger | '
'hexinteger\n'
' decinteger ::= nonzerodigit (["_"] digit)* | "0"+ (["_"] '
'"0")*\n'
' bininteger ::= "0" ("b" | "B") (["_"] bindigit)+\n'
' octinteger ::= "0" ("o" | "O") (["_"] octdigit)+\n'
' hexinteger ::= "0" ("x" | "X") (["_"] hexdigit)+\n'
' nonzerodigit ::= "1"..."9"\n'
' digit ::= "0"..."9"\n'
' bindigit ::= "0" | "1"\n'
' octdigit ::= "0"..."7"\n'
' hexdigit ::= digit | "a"..."f" | "A"..."F"\n'
'\n'
'There is no limit for the length of integer literals apart from '
'what\n'
'can be stored in available memory.\n'
'\n'
'Underscores are ignored for determining the numeric value of '
'the\n'
'literal. They can be used to group digits for enhanced '
'readability.\n'
'One underscore can occur between digits, and after base '
'specifiers\n'
'like "0x".\n'
'\n'
'Note that leading zeros in a non-zero decimal number are not '
'allowed.\n'
'This is for disambiguation with C-style octal literals, which '
'Python\n'
'used before version 3.0.\n'
'\n'
'Some examples of integer literals:\n'
'\n'
' 7 2147483647 0o177 0b100110111\n'
' 3 79228162514264337593543950336 0o377 0xdeadbeef\n'
' 100_000_000_000 0b_1110_0101\n'
'\n'
'Changed in version 3.6: Underscores are now allowed for '
'grouping\n'
'purposes in literals.\n',
'lambda': 'Lambdas\n'
'*******\n'
'\n'
' lambda_expr ::= "lambda" [parameter_list]: expression\n'
' lambda_expr_nocond ::= "lambda" [parameter_list]: '
'expression_nocond\n'
'\n'
'Lambda expressions (sometimes called lambda forms) are used to '
'create\n'
'anonymous functions. The expression "lambda parameters: '
'expression"\n'
'yields a function object. The unnamed object behaves like a '
'function\n'
'object defined with:\n'
'\n'
' def <lambda>(parameters):\n'
' return expression\n'
'\n'
'See section Function definitions for the syntax of parameter '
'lists.\n'
'Note that functions created with lambda expressions cannot '
'contain\n'
'statements or annotations.\n',
'lists': 'List displays\n'
'*************\n'
'\n'
'A list display is a possibly empty series of expressions enclosed '
'in\n'
'square brackets:\n'
'\n'
' list_display ::= "[" [starred_list | comprehension] "]"\n'
'\n'
'A list display yields a new list object, the contents being '
'specified\n'
'by either a list of expressions or a comprehension. When a comma-\n'
'separated list of expressions is supplied, its elements are '
'evaluated\n'
'from left to right and placed into the list object in that order.\n'
'When a comprehension is supplied, the list is constructed from the\n'
'elements resulting from the comprehension.\n',
'naming': 'Naming and binding\n'
'******************\n'
'\n'
'\n'
'Binding of names\n'
'================\n'
'\n'
'*Names* refer to objects. Names are introduced by name binding\n'
'operations.\n'
'\n'
'The following constructs bind names: formal parameters to '
'functions,\n'
'"import" statements, class and function definitions (these bind '
'the\n'
'class or function name in the defining block), and targets that '
'are\n'
'identifiers if occurring in an assignment, "for" loop header, or '
'after\n'
'"as" in a "with" statement or "except" clause. The "import" '
'statement\n'
'of the form "from ... import *" binds all names defined in the\n'
'imported module, except those beginning with an underscore. This '
'form\n'
'may only be used at the module level.\n'
'\n'
'A target occurring in a "del" statement is also considered bound '
'for\n'
'this purpose (though the actual semantics are to unbind the '
'name).\n'
'\n'
'Each assignment or import statement occurs within a block defined '
'by a\n'
'class or function definition or at the module level (the '
'top-level\n'
'code block).\n'
'\n'
'If a name is bound in a block, it is a local variable of that '
'block,\n'
'unless declared as "nonlocal" or "global". If a name is bound at '
'the\n'
'module level, it is a global variable. (The variables of the '
'module\n'
'code block are local and global.) If a variable is used in a '
'code\n'
'block but not defined there, it is a *free variable*.\n'
'\n'
'Each occurrence of a name in the program text refers to the '
'*binding*\n'
'of that name established by the following name resolution rules.\n'
'\n'
'\n'
'Resolution of names\n'
'===================\n'
'\n'
'A *scope* defines the visibility of a name within a block. If a '
'local\n'
'variable is defined in a block, its scope includes that block. If '
'the\n'
'definition occurs in a function block, the scope extends to any '
'blocks\n'
'contained within the defining one, unless a contained block '
'introduces\n'
'a different binding for the name.\n'
'\n'
'When a name is used in a code block, it is resolved using the '
'nearest\n'
'enclosing scope. The set of all such scopes visible to a code '
'block\n'
'is called the block’s *environment*.\n'
'\n'
'When a name is not found at all, a "NameError" exception is '
'raised. If\n'
'the current scope is a function scope, and the name refers to a '
'local\n'
'variable that has not yet been bound to a value at the point where '
'the\n'
'name is used, an "UnboundLocalError" exception is raised.\n'
'"UnboundLocalError" is a subclass of "NameError".\n'
'\n'
'If a name binding operation occurs anywhere within a code block, '
'all\n'
'uses of the name within the block are treated as references to '
'the\n'
'current block. This can lead to errors when a name is used within '
'a\n'
'block before it is bound. This rule is subtle. Python lacks\n'
'declarations and allows name binding operations to occur anywhere\n'
'within a code block. The local variables of a code block can be\n'
'determined by scanning the entire text of the block for name '
'binding\n'
'operations.\n'
'\n'
'If the "global" statement occurs within a block, all uses of the '
'name\n'
'specified in the statement refer to the binding of that name in '
'the\n'
'top-level namespace. Names are resolved in the top-level '
'namespace by\n'
'searching the global namespace, i.e. the namespace of the module\n'
'containing the code block, and the builtins namespace, the '
'namespace\n'
'of the module "builtins". The global namespace is searched '
'first. If\n'
'the name is not found there, the builtins namespace is searched. '
'The\n'
'"global" statement must precede all uses of the name.\n'
'\n'
'The "global" statement has the same scope as a name binding '
'operation\n'
'in the same block. If the nearest enclosing scope for a free '
'variable\n'
'contains a global statement, the free variable is treated as a '
'global.\n'
'\n'
'The "nonlocal" statement causes corresponding names to refer to\n'
'previously bound variables in the nearest enclosing function '
'scope.\n'
'"SyntaxError" is raised at compile time if the given name does '
'not\n'
'exist in any enclosing function scope.\n'
'\n'
'The namespace for a module is automatically created the first time '
'a\n'
'module is imported. The main module for a script is always '
'called\n'
'"__main__".\n'
'\n'
'Class definition blocks and arguments to "exec()" and "eval()" '
'are\n'
'special in the context of name resolution. A class definition is '
'an\n'
'executable statement that may use and define names. These '
'references\n'
'follow the normal rules for name resolution with an exception '
'that\n'
'unbound local variables are looked up in the global namespace. '
'The\n'
'namespace of the class definition becomes the attribute dictionary '
'of\n'
'the class. The scope of names defined in a class block is limited '
'to\n'
'the class block; it does not extend to the code blocks of methods '
'–\n'
'this includes comprehensions and generator expressions since they '
'are\n'
'implemented using a function scope. This means that the '
'following\n'
'will fail:\n'
'\n'
' class A:\n'
' a = 42\n'
' b = list(a + i for i in range(10))\n'
'\n'
'\n'
'Builtins and restricted execution\n'
'=================================\n'
'\n'
'**CPython implementation detail:** Users should not touch\n'
'"__builtins__"; it is strictly an implementation detail. Users\n'
'wanting to override values in the builtins namespace should '
'"import"\n'
'the "builtins" module and modify its attributes appropriately.\n'
'\n'
'The builtins namespace associated with the execution of a code '
'block\n'
'is actually found by looking up the name "__builtins__" in its '
'global\n'
'namespace; this should be a dictionary or a module (in the latter '
'case\n'
'the module’s dictionary is used). By default, when in the '
'"__main__"\n'
'module, "__builtins__" is the built-in module "builtins"; when in '
'any\n'
'other module, "__builtins__" is an alias for the dictionary of '
'the\n'
'"builtins" module itself.\n'
'\n'
'\n'
'Interaction with dynamic features\n'
'=================================\n'
'\n'
'Name resolution of free variables occurs at runtime, not at '
'compile\n'
'time. This means that the following code will print 42:\n'
'\n'
' i = 10\n'
' def f():\n'
' print(i)\n'
' i = 42\n'
' f()\n'
'\n'
'The "eval()" and "exec()" functions do not have access to the '
'full\n'
'environment for resolving names. Names may be resolved in the '
'local\n'
'and global namespaces of the caller. Free variables are not '
'resolved\n'
'in the nearest enclosing namespace, but in the global namespace. '
'[1]\n'
'The "exec()" and "eval()" functions have optional arguments to\n'
'override the global and local namespace. If only one namespace '
'is\n'
'specified, it is used for both.\n',
'nonlocal': 'The "nonlocal" statement\n'
'************************\n'
'\n'
' nonlocal_stmt ::= "nonlocal" identifier ("," identifier)*\n'
'\n'
'The "nonlocal" statement causes the listed identifiers to refer '
'to\n'
'previously bound variables in the nearest enclosing scope '
'excluding\n'
'globals. This is important because the default behavior for '
'binding is\n'
'to search the local namespace first. The statement allows\n'
'encapsulated code to rebind variables outside of the local '
'scope\n'
'besides the global (module) scope.\n'
'\n'
'Names listed in a "nonlocal" statement, unlike those listed in '
'a\n'
'"global" statement, must refer to pre-existing bindings in an\n'
'enclosing scope (the scope in which a new binding should be '
'created\n'
'cannot be determined unambiguously).\n'
'\n'
'Names listed in a "nonlocal" statement must not collide with '
'pre-\n'
'existing bindings in the local scope.\n'
'\n'
'See also:\n'
'\n'
' **PEP 3104** - Access to Names in Outer Scopes\n'
' The specification for the "nonlocal" statement.\n',
'numbers': 'Numeric literals\n'
'****************\n'
'\n'
'There are three types of numeric literals: integers, floating '
'point\n'
'numbers, and imaginary numbers. There are no complex literals\n'
'(complex numbers can be formed by adding a real number and an\n'
'imaginary number).\n'
'\n'
'Note that numeric literals do not include a sign; a phrase like '
'"-1"\n'
'is actually an expression composed of the unary operator ‘"-"‘ '
'and the\n'
'literal "1".\n',
'numeric-types': 'Emulating numeric types\n'
'***********************\n'
'\n'
'The following methods can be defined to emulate numeric '
'objects.\n'
'Methods corresponding to operations that are not supported '
'by the\n'
'particular kind of number implemented (e.g., bitwise '
'operations for\n'
'non-integral numbers) should be left undefined.\n'
'\n'
'object.__add__(self, other)\n'
'object.__sub__(self, other)\n'
'object.__mul__(self, other)\n'
'object.__matmul__(self, other)\n'
'object.__truediv__(self, other)\n'
'object.__floordiv__(self, other)\n'
'object.__mod__(self, other)\n'
'object.__divmod__(self, other)\n'
'object.__pow__(self, other[, modulo])\n'
'object.__lshift__(self, other)\n'
'object.__rshift__(self, other)\n'
'object.__and__(self, other)\n'
'object.__xor__(self, other)\n'
'object.__or__(self, other)\n'
'\n'
' These methods are called to implement the binary '
'arithmetic\n'
' operations ("+", "-", "*", "@", "/", "//", "%", '
'"divmod()",\n'
' "pow()", "**", "<<", ">>", "&", "^", "|"). For '
'instance, to\n'
' evaluate the expression "x + y", where *x* is an '
'instance of a\n'
' class that has an "__add__()" method, "x.__add__(y)" is '
'called.\n'
' The "__divmod__()" method should be the equivalent to '
'using\n'
' "__floordiv__()" and "__mod__()"; it should not be '
'related to\n'
' "__truediv__()". Note that "__pow__()" should be '
'defined to accept\n'
' an optional third argument if the ternary version of the '
'built-in\n'
' "pow()" function is to be supported.\n'
'\n'
' If one of those methods does not support the operation '
'with the\n'
' supplied arguments, it should return "NotImplemented".\n'
'\n'
'object.__radd__(self, other)\n'
'object.__rsub__(self, other)\n'
'object.__rmul__(self, other)\n'
'object.__rmatmul__(self, other)\n'
'object.__rtruediv__(self, other)\n'
'object.__rfloordiv__(self, other)\n'
'object.__rmod__(self, other)\n'
'object.__rdivmod__(self, other)\n'
'object.__rpow__(self, other)\n'
'object.__rlshift__(self, other)\n'
'object.__rrshift__(self, other)\n'
'object.__rand__(self, other)\n'
'object.__rxor__(self, other)\n'
'object.__ror__(self, other)\n'
'\n'
' These methods are called to implement the binary '
'arithmetic\n'
' operations ("+", "-", "*", "@", "/", "//", "%", '
'"divmod()",\n'
' "pow()", "**", "<<", ">>", "&", "^", "|") with reflected '
'(swapped)\n'
' operands. These functions are only called if the left '
'operand does\n'
' not support the corresponding operation [3] and the '
'operands are of\n'
' different types. [4] For instance, to evaluate the '
'expression "x -\n'
' y", where *y* is an instance of a class that has an '
'"__rsub__()"\n'
' method, "y.__rsub__(x)" is called if "x.__sub__(y)" '
'returns\n'
' *NotImplemented*.\n'
'\n'
' Note that ternary "pow()" will not try calling '
'"__rpow__()" (the\n'
' coercion rules would become too complicated).\n'
'\n'
' Note: If the right operand’s type is a subclass of the '
'left\n'
' operand’s type and that subclass provides the '
'reflected method\n'
' for the operation, this method will be called before '
'the left\n'
' operand’s non-reflected method. This behavior allows '
'subclasses\n'
' to override their ancestors’ operations.\n'
'\n'
'object.__iadd__(self, other)\n'
'object.__isub__(self, other)\n'
'object.__imul__(self, other)\n'
'object.__imatmul__(self, other)\n'
'object.__itruediv__(self, other)\n'
'object.__ifloordiv__(self, other)\n'
'object.__imod__(self, other)\n'
'object.__ipow__(self, other[, modulo])\n'
'object.__ilshift__(self, other)\n'
'object.__irshift__(self, other)\n'
'object.__iand__(self, other)\n'
'object.__ixor__(self, other)\n'
'object.__ior__(self, other)\n'
'\n'
' These methods are called to implement the augmented '
'arithmetic\n'
' assignments ("+=", "-=", "*=", "@=", "/=", "//=", "%=", '
'"**=",\n'
' "<<=", ">>=", "&=", "^=", "|="). These methods should '
'attempt to\n'
' do the operation in-place (modifying *self*) and return '
'the result\n'
' (which could be, but does not have to be, *self*). If a '
'specific\n'
' method is not defined, the augmented assignment falls '
'back to the\n'
' normal methods. For instance, if *x* is an instance of '
'a class\n'
' with an "__iadd__()" method, "x += y" is equivalent to '
'"x =\n'
' x.__iadd__(y)" . Otherwise, "x.__add__(y)" and '
'"y.__radd__(x)" are\n'
' considered, as with the evaluation of "x + y". In '
'certain\n'
' situations, augmented assignment can result in '
'unexpected errors\n'
' (see Why does a_tuple[i] += [‘item’] raise an exception '
'when the\n'
' addition works?), but this behavior is in fact part of '
'the data\n'
' model.\n'
'\n'
'object.__neg__(self)\n'
'object.__pos__(self)\n'
'object.__abs__(self)\n'
'object.__invert__(self)\n'
'\n'
' Called to implement the unary arithmetic operations '
'("-", "+",\n'
' "abs()" and "~").\n'
'\n'
'object.__complex__(self)\n'
'object.__int__(self)\n'
'object.__float__(self)\n'
'\n'
' Called to implement the built-in functions "complex()", '
'"int()" and\n'
' "float()". Should return a value of the appropriate '
'type.\n'
'\n'
'object.__index__(self)\n'
'\n'
' Called to implement "operator.index()", and whenever '
'Python needs\n'
' to losslessly convert the numeric object to an integer '
'object (such\n'
' as in slicing, or in the built-in "bin()", "hex()" and '
'"oct()"\n'
' functions). Presence of this method indicates that the '
'numeric\n'
' object is an integer type. Must return an integer.\n'
'\n'
' Note: In order to have a coherent integer type class, '
'when\n'
' "__index__()" is defined "__int__()" should also be '
'defined, and\n'
' both should return the same value.\n'
'\n'
'object.__round__(self[, ndigits])\n'
'object.__trunc__(self)\n'
'object.__floor__(self)\n'
'object.__ceil__(self)\n'
'\n'
' Called to implement the built-in function "round()" and '
'"math"\n'
' functions "trunc()", "floor()" and "ceil()". Unless '
'*ndigits* is\n'
' passed to "__round__()" all these methods should return '
'the value\n'
' of the object truncated to an "Integral" (typically an '
'"int").\n'
'\n'
' If "__int__()" is not defined then the built-in function '
'"int()"\n'
' falls back to "__trunc__()".\n',
'objects': 'Objects, values and types\n'
'*************************\n'
'\n'
'*Objects* are Python’s abstraction for data. All data in a '
'Python\n'
'program is represented by objects or by relations between '
'objects. (In\n'
'a sense, and in conformance to Von Neumann’s model of a “stored\n'
'program computer,” code is also represented by objects.)\n'
'\n'
'Every object has an identity, a type and a value. An object’s\n'
'*identity* never changes once it has been created; you may think '
'of it\n'
'as the object’s address in memory. The ‘"is"’ operator compares '
'the\n'
'identity of two objects; the "id()" function returns an integer\n'
'representing its identity.\n'
'\n'
'**CPython implementation detail:** For CPython, "id(x)" is the '
'memory\n'
'address where "x" is stored.\n'
'\n'
'An object’s type determines the operations that the object '
'supports\n'
'(e.g., “does it have a length?”) and also defines the possible '
'values\n'
'for objects of that type. The "type()" function returns an '
'object’s\n'
'type (which is an object itself). Like its identity, an '
'object’s\n'
'*type* is also unchangeable. [1]\n'
'\n'
'The *value* of some objects can change. Objects whose value can\n'
'change are said to be *mutable*; objects whose value is '
'unchangeable\n'
'once they are created are called *immutable*. (The value of an\n'
'immutable container object that contains a reference to a '
'mutable\n'
'object can change when the latter’s value is changed; however '
'the\n'
'container is still considered immutable, because the collection '
'of\n'
'objects it contains cannot be changed. So, immutability is not\n'
'strictly the same as having an unchangeable value, it is more '
'subtle.)\n'
'An object’s mutability is determined by its type; for instance,\n'
'numbers, strings and tuples are immutable, while dictionaries '
'and\n'
'lists are mutable.\n'
'\n'
'Objects are never explicitly destroyed; however, when they '
'become\n'
'unreachable they may be garbage-collected. An implementation is\n'
'allowed to postpone garbage collection or omit it altogether — it '
'is a\n'
'matter of implementation quality how garbage collection is\n'
'implemented, as long as no objects are collected that are still\n'
'reachable.\n'
'\n'
'**CPython implementation detail:** CPython currently uses a '
'reference-\n'
'counting scheme with (optional) delayed detection of cyclically '
'linked\n'
'garbage, which collects most objects as soon as they become\n'
'unreachable, but is not guaranteed to collect garbage containing\n'
'circular references. See the documentation of the "gc" module '
'for\n'
'information on controlling the collection of cyclic garbage. '
'Other\n'
'implementations act differently and CPython may change. Do not '
'depend\n'
'on immediate finalization of objects when they become unreachable '
'(so\n'
'you should always close files explicitly).\n'
'\n'
'Note that the use of the implementation’s tracing or debugging\n'
'facilities may keep objects alive that would normally be '
'collectable.\n'
'Also note that catching an exception with a ‘"try"…"except"’ '
'statement\n'
'may keep objects alive.\n'
'\n'
'Some objects contain references to “external” resources such as '
'open\n'
'files or windows. It is understood that these resources are '
'freed\n'
'when the object is garbage-collected, but since garbage '
'collection is\n'
'not guaranteed to happen, such objects also provide an explicit '
'way to\n'
'release the external resource, usually a "close()" method. '
'Programs\n'
'are strongly recommended to explicitly close such objects. The\n'
'‘"try"…"finally"’ statement and the ‘"with"’ statement provide\n'
'convenient ways to do this.\n'
'\n'
'Some objects contain references to other objects; these are '
'called\n'
'*containers*. Examples of containers are tuples, lists and\n'
'dictionaries. The references are part of a container’s value. '
'In\n'
'most cases, when we talk about the value of a container, we imply '
'the\n'
'values, not the identities of the contained objects; however, '
'when we\n'
'talk about the mutability of a container, only the identities of '
'the\n'
'immediately contained objects are implied. So, if an immutable\n'
'container (like a tuple) contains a reference to a mutable '
'object, its\n'
'value changes if that mutable object is changed.\n'
'\n'
'Types affect almost all aspects of object behavior. Even the\n'
'importance of object identity is affected in some sense: for '
'immutable\n'
'types, operations that compute new values may actually return a\n'
'reference to any existing object with the same type and value, '
'while\n'
'for mutable objects this is not allowed. E.g., after "a = 1; b = '
'1",\n'
'"a" and "b" may or may not refer to the same object with the '
'value\n'
'one, depending on the implementation, but after "c = []; d = []", '
'"c"\n'
'and "d" are guaranteed to refer to two different, unique, newly\n'
'created empty lists. (Note that "c = d = []" assigns the same '
'object\n'
'to both "c" and "d".)\n',
'operator-summary': 'Operator precedence\n'
'*******************\n'
'\n'
'The following table summarizes the operator precedence '
'in Python, from\n'
'lowest precedence (least binding) to highest precedence '
'(most\n'
'binding). Operators in the same box have the same '
'precedence. Unless\n'
'the syntax is explicitly given, operators are binary. '
'Operators in\n'
'the same box group left to right (except for '
'exponentiation, which\n'
'groups from right to left).\n'
'\n'
'Note that comparisons, membership tests, and identity '
'tests, all have\n'
'the same precedence and have a left-to-right chaining '
'feature as\n'
'described in the Comparisons section.\n'
'\n'
'+-------------------------------------------------+---------------------------------------+\n'
'| Operator | '
'Description |\n'
'+=================================================+=======================================+\n'
'| "lambda" | '
'Lambda expression |\n'
'+-------------------------------------------------+---------------------------------------+\n'
'| "if" – "else" | '
'Conditional expression |\n'
'+-------------------------------------------------+---------------------------------------+\n'
'| "or" | '
'Boolean OR |\n'
'+-------------------------------------------------+---------------------------------------+\n'
'| "and" | '
'Boolean AND |\n'
'+-------------------------------------------------+---------------------------------------+\n'
'| "not" "x" | '
'Boolean NOT |\n'
'+-------------------------------------------------+---------------------------------------+\n'
'| "in", "not in", "is", "is not", "<", "<=", ">", | '
'Comparisons, including membership |\n'
'| ">=", "!=", "==" | '
'tests and identity tests |\n'
'+-------------------------------------------------+---------------------------------------+\n'
'| "|" | '
'Bitwise OR |\n'
'+-------------------------------------------------+---------------------------------------+\n'
'| "^" | '
'Bitwise XOR |\n'
'+-------------------------------------------------+---------------------------------------+\n'
'| "&" | '
'Bitwise AND |\n'
'+-------------------------------------------------+---------------------------------------+\n'
'| "<<", ">>" | '
'Shifts |\n'
'+-------------------------------------------------+---------------------------------------+\n'
'| "+", "-" | '
'Addition and subtraction |\n'
'+-------------------------------------------------+---------------------------------------+\n'
'| "*", "@", "/", "//", "%" | '
'Multiplication, matrix |\n'
'| | '
'multiplication, division, floor |\n'
'| | '
'division, remainder [5] |\n'
'+-------------------------------------------------+---------------------------------------+\n'
'| "+x", "-x", "~x" | '
'Positive, negative, bitwise NOT |\n'
'+-------------------------------------------------+---------------------------------------+\n'
'| "**" | '
'Exponentiation [6] |\n'
'+-------------------------------------------------+---------------------------------------+\n'
'| "await" "x" | '
'Await expression |\n'
'+-------------------------------------------------+---------------------------------------+\n'
'| "x[index]", "x[index:index]", | '
'Subscription, slicing, call, |\n'
'| "x(arguments...)", "x.attribute" | '
'attribute reference |\n'
'+-------------------------------------------------+---------------------------------------+\n'
'| "(expressions...)", "[expressions...]", "{key: | '
'Binding or tuple display, list |\n'
'| value...}", "{expressions...}" | '
'display, dictionary display, set |\n'
'| | '
'display |\n'
'+-------------------------------------------------+---------------------------------------+\n'
'\n'
'-[ Footnotes ]-\n'
'\n'
'[1] While "abs(x%y) < abs(y)" is true mathematically, '
'for floats\n'
' it may not be true numerically due to roundoff. For '
'example, and\n'
' assuming a platform on which a Python float is an '
'IEEE 754 double-\n'
' precision number, in order that "-1e-100 % 1e100" '
'have the same\n'
' sign as "1e100", the computed result is "-1e-100 + '
'1e100", which\n'
' is numerically exactly equal to "1e100". The '
'function\n'
' "math.fmod()" returns a result whose sign matches '
'the sign of the\n'
' first argument instead, and so returns "-1e-100" in '
'this case.\n'
' Which approach is more appropriate depends on the '
'application.\n'
'\n'
'[2] If x is very close to an exact integer multiple of '
'y, it’s\n'
' possible for "x//y" to be one larger than '
'"(x-x%y)//y" due to\n'
' rounding. In such cases, Python returns the latter '
'result, in\n'
' order to preserve that "divmod(x,y)[0] * y + x % y" '
'be very close\n'
' to "x".\n'
'\n'
'[3] The Unicode standard distinguishes between *code '
'points* (e.g.\n'
' U+0041) and *abstract characters* (e.g. “LATIN '
'CAPITAL LETTER A”).\n'
' While most abstract characters in Unicode are only '
'represented\n'
' using one code point, there is a number of abstract '
'characters\n'
' that can in addition be represented using a sequence '
'of more than\n'
' one code point. For example, the abstract character '
'“LATIN\n'
' CAPITAL LETTER C WITH CEDILLA” can be represented as '
'a single\n'
' *precomposed character* at code position U+00C7, or '
'as a sequence\n'
' of a *base character* at code position U+0043 (LATIN '
'CAPITAL\n'
' LETTER C), followed by a *combining character* at '
'code position\n'
' U+0327 (COMBINING CEDILLA).\n'
'\n'
' The comparison operators on strings compare at the '
'level of\n'
' Unicode code points. This may be counter-intuitive '
'to humans. For\n'
' example, ""\\u00C7" == "\\u0043\\u0327"" is "False", '
'even though both\n'
' strings represent the same abstract character “LATIN '
'CAPITAL\n'
' LETTER C WITH CEDILLA”.\n'
'\n'
' To compare strings at the level of abstract '
'characters (that is,\n'
' in a way intuitive to humans), use '
'"unicodedata.normalize()".\n'
'\n'
'[4] Due to automatic garbage-collection, free lists, and '
'the\n'
' dynamic nature of descriptors, you may notice '
'seemingly unusual\n'
' behaviour in certain uses of the "is" operator, like '
'those\n'
' involving comparisons between instance methods, or '
'constants.\n'
' Check their documentation for more info.\n'
'\n'
'[5] The "%" operator is also used for string formatting; '
'the same\n'
' precedence applies.\n'
'\n'
'[6] The power operator "**" binds less tightly than an '
'arithmetic\n'
' or bitwise unary operator on its right, that is, '
'"2**-1" is "0.5".\n',
'pass': 'The "pass" statement\n'
'********************\n'
'\n'
' pass_stmt ::= "pass"\n'
'\n'
'"pass" is a null operation — when it is executed, nothing happens. '
'It\n'
'is useful as a placeholder when a statement is required '
'syntactically,\n'
'but no code needs to be executed, for example:\n'
'\n'
' def f(arg): pass # a function that does nothing (yet)\n'
'\n'
' class C: pass # a class with no methods (yet)\n',
'power': 'The power operator\n'
'******************\n'
'\n'
'The power operator binds more tightly than unary operators on its\n'
'left; it binds less tightly than unary operators on its right. '
'The\n'
'syntax is:\n'
'\n'
' power ::= ( await_expr | primary ) ["**" u_expr]\n'
'\n'
'Thus, in an unparenthesized sequence of power and unary operators, '
'the\n'
'operators are evaluated from right to left (this does not '
'constrain\n'
'the evaluation order for the operands): "-1**2" results in "-1".\n'
'\n'
'The power operator has the same semantics as the built-in "pow()"\n'
'function, when called with two arguments: it yields its left '
'argument\n'
'raised to the power of its right argument. The numeric arguments '
'are\n'
'first converted to a common type, and the result is of that type.\n'
'\n'
'For int operands, the result has the same type as the operands '
'unless\n'
'the second argument is negative; in that case, all arguments are\n'
'converted to float and a float result is delivered. For example,\n'
'"10**2" returns "100", but "10**-2" returns "0.01".\n'
'\n'
'Raising "0.0" to a negative power results in a '
'"ZeroDivisionError".\n'
'Raising a negative number to a fractional power results in a '
'"complex"\n'
'number. (In earlier versions it raised a "ValueError".)\n',
'raise': 'The "raise" statement\n'
'*********************\n'
'\n'
' raise_stmt ::= "raise" [expression ["from" expression]]\n'
'\n'
'If no expressions are present, "raise" re-raises the last '
'exception\n'
'that was active in the current scope. If no exception is active '
'in\n'
'the current scope, a "RuntimeError" exception is raised indicating\n'
'that this is an error.\n'
'\n'
'Otherwise, "raise" evaluates the first expression as the exception\n'
'object. It must be either a subclass or an instance of\n'
'"BaseException". If it is a class, the exception instance will be\n'
'obtained when needed by instantiating the class with no arguments.\n'
'\n'
'The *type* of the exception is the exception instance’s class, the\n'
'*value* is the instance itself.\n'
'\n'
'A traceback object is normally created automatically when an '
'exception\n'
'is raised and attached to it as the "__traceback__" attribute, '
'which\n'
'is writable. You can create an exception and set your own traceback '
'in\n'
'one step using the "with_traceback()" exception method (which '
'returns\n'
'the same exception instance, with its traceback set to its '
'argument),\n'
'like so:\n'
'\n'
' raise Exception("foo occurred").with_traceback(tracebackobj)\n'
'\n'
'The "from" clause is used for exception chaining: if given, the '
'second\n'
'*expression* must be another exception class or instance, which '
'will\n'
'then be attached to the raised exception as the "__cause__" '
'attribute\n'
'(which is writable). If the raised exception is not handled, both\n'
'exceptions will be printed:\n'
'\n'
' >>> try:\n'
' ... print(1 / 0)\n'
' ... except Exception as exc:\n'
' ... raise RuntimeError("Something bad happened") from exc\n'
' ...\n'
' Traceback (most recent call last):\n'
' File "<stdin>", line 2, in <module>\n'
' ZeroDivisionError: division by zero\n'
'\n'
' The above exception was the direct cause of the following '
'exception:\n'
'\n'
' Traceback (most recent call last):\n'
' File "<stdin>", line 4, in <module>\n'
' RuntimeError: Something bad happened\n'
'\n'
'A similar mechanism works implicitly if an exception is raised '
'inside\n'
'an exception handler or a "finally" clause: the previous exception '
'is\n'
'then attached as the new exception’s "__context__" attribute:\n'
'\n'
' >>> try:\n'
' ... print(1 / 0)\n'
' ... except:\n'
' ... raise RuntimeError("Something bad happened")\n'
' ...\n'
' Traceback (most recent call last):\n'
' File "<stdin>", line 2, in <module>\n'
' ZeroDivisionError: division by zero\n'
'\n'
' During handling of the above exception, another exception '
'occurred:\n'
'\n'
' Traceback (most recent call last):\n'
' File "<stdin>", line 4, in <module>\n'
' RuntimeError: Something bad happened\n'
'\n'
'Exception chaining can be explicitly suppressed by specifying '
'"None"\n'
'in the "from" clause:\n'
'\n'
' >>> try:\n'
' ... print(1 / 0)\n'
' ... except:\n'
' ... raise RuntimeError("Something bad happened") from None\n'
' ...\n'
' Traceback (most recent call last):\n'
' File "<stdin>", line 4, in <module>\n'
' RuntimeError: Something bad happened\n'
'\n'
'Additional information on exceptions can be found in section\n'
'Exceptions, and information about handling exceptions is in '
'section\n'
'The try statement.\n'
'\n'
'Changed in version 3.3: "None" is now permitted as "Y" in "raise X\n'
'from Y".\n'
'\n'
'New in version 3.3: The "__suppress_context__" attribute to '
'suppress\n'
'automatic display of the exception context.\n',
'return': 'The "return" statement\n'
'**********************\n'
'\n'
' return_stmt ::= "return" [expression_list]\n'
'\n'
'"return" may only occur syntactically nested in a function '
'definition,\n'
'not within a nested class definition.\n'
'\n'
'If an expression list is present, it is evaluated, else "None" is\n'
'substituted.\n'
'\n'
'"return" leaves the current function call with the expression list '
'(or\n'
'"None") as return value.\n'
'\n'
'When "return" passes control out of a "try" statement with a '
'"finally"\n'
'clause, that "finally" clause is executed before really leaving '
'the\n'
'function.\n'
'\n'
'In a generator function, the "return" statement indicates that '
'the\n'
'generator is done and will cause "StopIteration" to be raised. '
'The\n'
'returned value (if any) is used as an argument to construct\n'
'"StopIteration" and becomes the "StopIteration.value" attribute.\n'
'\n'
'In an asynchronous generator function, an empty "return" '
'statement\n'
'indicates that the asynchronous generator is done and will cause\n'
'"StopAsyncIteration" to be raised. A non-empty "return" statement '
'is\n'
'a syntax error in an asynchronous generator function.\n',
'sequence-types': 'Emulating container types\n'
'*************************\n'
'\n'
'The following methods can be defined to implement '
'container objects.\n'
'Containers usually are sequences (such as lists or tuples) '
'or mappings\n'
'(like dictionaries), but can represent other containers as '
'well. The\n'
'first set of methods is used either to emulate a sequence '
'or to\n'
'emulate a mapping; the difference is that for a sequence, '
'the\n'
'allowable keys should be the integers *k* for which "0 <= '
'k < N" where\n'
'*N* is the length of the sequence, or slice objects, which '
'define a\n'
'range of items. It is also recommended that mappings '
'provide the\n'
'methods "keys()", "values()", "items()", "get()", '
'"clear()",\n'
'"setdefault()", "pop()", "popitem()", "copy()", and '
'"update()"\n'
'behaving similar to those for Python’s standard dictionary '
'objects.\n'
'The "collections" module provides a "MutableMapping" '
'abstract base\n'
'class to help create those methods from a base set of '
'"__getitem__()",\n'
'"__setitem__()", "__delitem__()", and "keys()". Mutable '
'sequences\n'
'should provide methods "append()", "count()", "index()", '
'"extend()",\n'
'"insert()", "pop()", "remove()", "reverse()" and "sort()", '
'like Python\n'
'standard list objects. Finally, sequence types should '
'implement\n'
'addition (meaning concatenation) and multiplication '
'(meaning\n'
'repetition) by defining the methods "__add__()", '
'"__radd__()",\n'
'"__iadd__()", "__mul__()", "__rmul__()" and "__imul__()" '
'described\n'
'below; they should not define other numerical operators. '
'It is\n'
'recommended that both mappings and sequences implement '
'the\n'
'"__contains__()" method to allow efficient use of the "in" '
'operator;\n'
'for mappings, "in" should search the mapping’s keys; for '
'sequences, it\n'
'should search through the values. It is further '
'recommended that both\n'
'mappings and sequences implement the "__iter__()" method '
'to allow\n'
'efficient iteration through the container; for mappings, '
'"__iter__()"\n'
'should be the same as "keys()"; for sequences, it should '
'iterate\n'
'through the values.\n'
'\n'
'object.__len__(self)\n'
'\n'
' Called to implement the built-in function "len()". '
'Should return\n'
' the length of the object, an integer ">=" 0. Also, an '
'object that\n'
' doesn’t define a "__bool__()" method and whose '
'"__len__()" method\n'
' returns zero is considered to be false in a Boolean '
'context.\n'
'\n'
' **CPython implementation detail:** In CPython, the '
'length is\n'
' required to be at most "sys.maxsize". If the length is '
'larger than\n'
' "sys.maxsize" some features (such as "len()") may '
'raise\n'
' "OverflowError". To prevent raising "OverflowError" by '
'truth value\n'
' testing, an object must define a "__bool__()" method.\n'
'\n'
'object.__length_hint__(self)\n'
'\n'
' Called to implement "operator.length_hint()". Should '
'return an\n'
' estimated length for the object (which may be greater '
'or less than\n'
' the actual length). The length must be an integer ">=" '
'0. This\n'
' method is purely an optimization and is never required '
'for\n'
' correctness.\n'
'\n'
' New in version 3.4.\n'
'\n'
'Note: Slicing is done exclusively with the following three '
'methods.\n'
' A call like\n'
'\n'
' a[1:2] = b\n'
'\n'
' is translated to\n'
'\n'
' a[slice(1, 2, None)] = b\n'
'\n'
' and so forth. Missing slice items are always filled in '
'with "None".\n'
'\n'
'object.__getitem__(self, key)\n'
'\n'
' Called to implement evaluation of "self[key]". For '
'sequence types,\n'
' the accepted keys should be integers and slice '
'objects. Note that\n'
' the special interpretation of negative indexes (if the '
'class wishes\n'
' to emulate a sequence type) is up to the '
'"__getitem__()" method. If\n'
' *key* is of an inappropriate type, "TypeError" may be '
'raised; if of\n'
' a value outside the set of indexes for the sequence '
'(after any\n'
' special interpretation of negative values), '
'"IndexError" should be\n'
' raised. For mapping types, if *key* is missing (not in '
'the\n'
' container), "KeyError" should be raised.\n'
'\n'
' Note: "for" loops expect that an "IndexError" will be '
'raised for\n'
' illegal indexes to allow proper detection of the end '
'of the\n'
' sequence.\n'
'\n'
'object.__missing__(self, key)\n'
'\n'
' Called by "dict"."__getitem__()" to implement '
'"self[key]" for dict\n'
' subclasses when key is not in the dictionary.\n'
'\n'
'object.__setitem__(self, key, value)\n'
'\n'
' Called to implement assignment to "self[key]". Same '
'note as for\n'
' "__getitem__()". This should only be implemented for '
'mappings if\n'
' the objects support changes to the values for keys, or '
'if new keys\n'
' can be added, or for sequences if elements can be '
'replaced. The\n'
' same exceptions should be raised for improper *key* '
'values as for\n'
' the "__getitem__()" method.\n'
'\n'
'object.__delitem__(self, key)\n'
'\n'
' Called to implement deletion of "self[key]". Same note '
'as for\n'
' "__getitem__()". This should only be implemented for '
'mappings if\n'
' the objects support removal of keys, or for sequences '
'if elements\n'
' can be removed from the sequence. The same exceptions '
'should be\n'
' raised for improper *key* values as for the '
'"__getitem__()" method.\n'
'\n'
'object.__iter__(self)\n'
'\n'
' This method is called when an iterator is required for '
'a container.\n'
' This method should return a new iterator object that '
'can iterate\n'
' over all the objects in the container. For mappings, '
'it should\n'
' iterate over the keys of the container.\n'
'\n'
' Iterator objects also need to implement this method; '
'they are\n'
' required to return themselves. For more information on '
'iterator\n'
' objects, see Iterator Types.\n'
'\n'
'object.__reversed__(self)\n'
'\n'
' Called (if present) by the "reversed()" built-in to '
'implement\n'
' reverse iteration. It should return a new iterator '
'object that\n'
' iterates over all the objects in the container in '
'reverse order.\n'
'\n'
' If the "__reversed__()" method is not provided, the '
'"reversed()"\n'
' built-in will fall back to using the sequence protocol '
'("__len__()"\n'
' and "__getitem__()"). Objects that support the '
'sequence protocol\n'
' should only provide "__reversed__()" if they can '
'provide an\n'
' implementation that is more efficient than the one '
'provided by\n'
' "reversed()".\n'
'\n'
'The membership test operators ("in" and "not in") are '
'normally\n'
'implemented as an iteration through a sequence. However, '
'container\n'
'objects can supply the following special method with a '
'more efficient\n'
'implementation, which also does not require the object be '
'a sequence.\n'
'\n'
'object.__contains__(self, item)\n'
'\n'
' Called to implement membership test operators. Should '
'return true\n'
' if *item* is in *self*, false otherwise. For mapping '
'objects, this\n'
' should consider the keys of the mapping rather than the '
'values or\n'
' the key-item pairs.\n'
'\n'
' For objects that don’t define "__contains__()", the '
'membership test\n'
' first tries iteration via "__iter__()", then the old '
'sequence\n'
' iteration protocol via "__getitem__()", see this '
'section in the\n'
' language reference.\n',
'shifting': 'Shifting operations\n'
'*******************\n'
'\n'
'The shifting operations have lower priority than the arithmetic\n'
'operations:\n'
'\n'
' shift_expr ::= a_expr | shift_expr ( "<<" | ">>" ) a_expr\n'
'\n'
'These operators accept integers as arguments. They shift the '
'first\n'
'argument to the left or right by the number of bits given by '
'the\n'
'second argument.\n'
'\n'
'A right shift by *n* bits is defined as floor division by '
'"pow(2,n)".\n'
'A left shift by *n* bits is defined as multiplication with '
'"pow(2,n)".\n'
'\n'
'Note: In the current implementation, the right-hand operand is\n'
' required to be at most "sys.maxsize". If the right-hand '
'operand is\n'
' larger than "sys.maxsize" an "OverflowError" exception is '
'raised.\n',
'slicings': 'Slicings\n'
'********\n'
'\n'
'A slicing selects a range of items in a sequence object (e.g., '
'a\n'
'string, tuple or list). Slicings may be used as expressions or '
'as\n'
'targets in assignment or "del" statements. The syntax for a '
'slicing:\n'
'\n'
' slicing ::= primary "[" slice_list "]"\n'
' slice_list ::= slice_item ("," slice_item)* [","]\n'
' slice_item ::= expression | proper_slice\n'
' proper_slice ::= [lower_bound] ":" [upper_bound] [ ":" '
'[stride] ]\n'
' lower_bound ::= expression\n'
' upper_bound ::= expression\n'
' stride ::= expression\n'
'\n'
'There is ambiguity in the formal syntax here: anything that '
'looks like\n'
'an expression list also looks like a slice list, so any '
'subscription\n'
'can be interpreted as a slicing. Rather than further '
'complicating the\n'
'syntax, this is disambiguated by defining that in this case the\n'
'interpretation as a subscription takes priority over the\n'
'interpretation as a slicing (this is the case if the slice list\n'
'contains no proper slice).\n'
'\n'
'The semantics for a slicing are as follows. The primary is '
'indexed\n'
'(using the same "__getitem__()" method as normal subscription) '
'with a\n'
'key that is constructed from the slice list, as follows. If the '
'slice\n'
'list contains at least one comma, the key is a tuple containing '
'the\n'
'conversion of the slice items; otherwise, the conversion of the '
'lone\n'
'slice item is the key. The conversion of a slice item that is '
'an\n'
'expression is that expression. The conversion of a proper slice '
'is a\n'
'slice object (see section The standard type hierarchy) whose '
'"start",\n'
'"stop" and "step" attributes are the values of the expressions '
'given\n'
'as lower bound, upper bound and stride, respectively, '
'substituting\n'
'"None" for missing expressions.\n',
'specialattrs': 'Special Attributes\n'
'******************\n'
'\n'
'The implementation adds a few special read-only attributes '
'to several\n'
'object types, where they are relevant. Some of these are '
'not reported\n'
'by the "dir()" built-in function.\n'
'\n'
'object.__dict__\n'
'\n'
' A dictionary or other mapping object used to store an '
'object’s\n'
' (writable) attributes.\n'
'\n'
'instance.__class__\n'
'\n'
' The class to which a class instance belongs.\n'
'\n'
'class.__bases__\n'
'\n'
' The tuple of base classes of a class object.\n'
'\n'
'definition.__name__\n'
'\n'
' The name of the class, function, method, descriptor, or '
'generator\n'
' instance.\n'
'\n'
'definition.__qualname__\n'
'\n'
' The *qualified name* of the class, function, method, '
'descriptor, or\n'
' generator instance.\n'
'\n'
' New in version 3.3.\n'
'\n'
'class.__mro__\n'
'\n'
' This attribute is a tuple of classes that are considered '
'when\n'
' looking for base classes during method resolution.\n'
'\n'
'class.mro()\n'
'\n'
' This method can be overridden by a metaclass to customize '
'the\n'
' method resolution order for its instances. It is called '
'at class\n'
' instantiation, and its result is stored in "__mro__".\n'
'\n'
'class.__subclasses__()\n'
'\n'
' Each class keeps a list of weak references to its '
'immediate\n'
' subclasses. This method returns a list of all those '
'references\n'
' still alive. Example:\n'
'\n'
' >>> int.__subclasses__()\n'
" [<class 'bool'>]\n"
'\n'
'-[ Footnotes ]-\n'
'\n'
'[1] Additional information on these special methods may be '
'found\n'
' in the Python Reference Manual (Basic customization).\n'
'\n'
'[2] As a consequence, the list "[1, 2]" is considered equal '
'to\n'
' "[1.0, 2.0]", and similarly for tuples.\n'
'\n'
'[3] They must have since the parser can’t tell the type of '
'the\n'
' operands.\n'
'\n'
'[4] Cased characters are those with general category '
'property\n'
' being one of “Lu” (Letter, uppercase), “Ll” (Letter, '
'lowercase),\n'
' or “Lt” (Letter, titlecase).\n'
'\n'
'[5] To format only a tuple you should therefore provide a\n'
' singleton tuple whose only element is the tuple to be '
'formatted.\n',
'specialnames': 'Special method names\n'
'********************\n'
'\n'
'A class can implement certain operations that are invoked by '
'special\n'
'syntax (such as arithmetic operations or subscripting and '
'slicing) by\n'
'defining methods with special names. This is Python’s '
'approach to\n'
'*operator overloading*, allowing classes to define their own '
'behavior\n'
'with respect to language operators. For instance, if a '
'class defines\n'
'a method named "__getitem__()", and "x" is an instance of '
'this class,\n'
'then "x[i]" is roughly equivalent to "type(x).__getitem__(x, '
'i)".\n'
'Except where mentioned, attempts to execute an operation '
'raise an\n'
'exception when no appropriate method is defined (typically\n'
'"AttributeError" or "TypeError").\n'
'\n'
'Setting a special method to "None" indicates that the '
'corresponding\n'
'operation is not available. For example, if a class sets '
'"__iter__()"\n'
'to "None", the class is not iterable, so calling "iter()" on '
'its\n'
'instances will raise a "TypeError" (without falling back to\n'
'"__getitem__()"). [2]\n'
'\n'
'When implementing a class that emulates any built-in type, '
'it is\n'
'important that the emulation only be implemented to the '
'degree that it\n'
'makes sense for the object being modelled. For example, '
'some\n'
'sequences may work well with retrieval of individual '
'elements, but\n'
'extracting a slice may not make sense. (One example of this '
'is the\n'
'"NodeList" interface in the W3C’s Document Object Model.)\n'
'\n'
'\n'
'Basic customization\n'
'===================\n'
'\n'
'object.__new__(cls[, ...])\n'
'\n'
' Called to create a new instance of class *cls*. '
'"__new__()" is a\n'
' static method (special-cased so you need not declare it '
'as such)\n'
' that takes the class of which an instance was requested '
'as its\n'
' first argument. The remaining arguments are those passed '
'to the\n'
' object constructor expression (the call to the class). '
'The return\n'
' value of "__new__()" should be the new object instance '
'(usually an\n'
' instance of *cls*).\n'
'\n'
' Typical implementations create a new instance of the '
'class by\n'
' invoking the superclass’s "__new__()" method using\n'
' "super().__new__(cls[, ...])" with appropriate arguments '
'and then\n'
' modifying the newly-created instance as necessary before '
'returning\n'
' it.\n'
'\n'
' If "__new__()" returns an instance of *cls*, then the '
'new\n'
' instance’s "__init__()" method will be invoked like\n'
' "__init__(self[, ...])", where *self* is the new instance '
'and the\n'
' remaining arguments are the same as were passed to '
'"__new__()".\n'
'\n'
' If "__new__()" does not return an instance of *cls*, then '
'the new\n'
' instance’s "__init__()" method will not be invoked.\n'
'\n'
' "__new__()" is intended mainly to allow subclasses of '
'immutable\n'
' types (like int, str, or tuple) to customize instance '
'creation. It\n'
' is also commonly overridden in custom metaclasses in '
'order to\n'
' customize class creation.\n'
'\n'
'object.__init__(self[, ...])\n'
'\n'
' Called after the instance has been created (by '
'"__new__()"), but\n'
' before it is returned to the caller. The arguments are '
'those\n'
' passed to the class constructor expression. If a base '
'class has an\n'
' "__init__()" method, the derived class’s "__init__()" '
'method, if\n'
' any, must explicitly call it to ensure proper '
'initialization of the\n'
' base class part of the instance; for example:\n'
' "super().__init__([args...])".\n'
'\n'
' Because "__new__()" and "__init__()" work together in '
'constructing\n'
' objects ("__new__()" to create it, and "__init__()" to '
'customize\n'
' it), no non-"None" value may be returned by "__init__()"; '
'doing so\n'
' will cause a "TypeError" to be raised at runtime.\n'
'\n'
'object.__del__(self)\n'
'\n'
' Called when the instance is about to be destroyed. This '
'is also\n'
' called a finalizer or (improperly) a destructor. If a '
'base class\n'
' has a "__del__()" method, the derived class’s "__del__()" '
'method,\n'
' if any, must explicitly call it to ensure proper deletion '
'of the\n'
' base class part of the instance.\n'
'\n'
' It is possible (though not recommended!) for the '
'"__del__()" method\n'
' to postpone destruction of the instance by creating a new '
'reference\n'
' to it. This is called object *resurrection*. It is\n'
' implementation-dependent whether "__del__()" is called a '
'second\n'
' time when a resurrected object is about to be destroyed; '
'the\n'
' current *CPython* implementation only calls it once.\n'
'\n'
' It is not guaranteed that "__del__()" methods are called '
'for\n'
' objects that still exist when the interpreter exits.\n'
'\n'
' Note: "del x" doesn’t directly call "x.__del__()" — the '
'former\n'
' decrements the reference count for "x" by one, and the '
'latter is\n'
' only called when "x"’s reference count reaches zero.\n'
'\n'
' **CPython implementation detail:** It is possible for a '
'reference\n'
' cycle to prevent the reference count of an object from '
'going to\n'
' zero. In this case, the cycle will be later detected and '
'deleted\n'
' by the *cyclic garbage collector*. A common cause of '
'reference\n'
' cycles is when an exception has been caught in a local '
'variable.\n'
' The frame’s locals then reference the exception, which '
'references\n'
' its own traceback, which references the locals of all '
'frames caught\n'
' in the traceback.\n'
'\n'
' See also: Documentation for the "gc" module.\n'
'\n'
' Warning: Due to the precarious circumstances under which\n'
' "__del__()" methods are invoked, exceptions that occur '
'during\n'
' their execution are ignored, and a warning is printed '
'to\n'
' "sys.stderr" instead. In particular:\n'
'\n'
' * "__del__()" can be invoked when arbitrary code is '
'being\n'
' executed, including from any arbitrary thread. If '
'"__del__()"\n'
' needs to take a lock or invoke any other blocking '
'resource, it\n'
' may deadlock as the resource may already be taken by '
'the code\n'
' that gets interrupted to execute "__del__()".\n'
'\n'
' * "__del__()" can be executed during interpreter '
'shutdown. As\n'
' a consequence, the global variables it needs to '
'access\n'
' (including other modules) may already have been '
'deleted or set\n'
' to "None". Python guarantees that globals whose name '
'begins\n'
' with a single underscore are deleted from their '
'module before\n'
' other globals are deleted; if no other references to '
'such\n'
' globals exist, this may help in assuring that '
'imported modules\n'
' are still available at the time when the "__del__()" '
'method is\n'
' called.\n'
'\n'
'object.__repr__(self)\n'
'\n'
' Called by the "repr()" built-in function to compute the '
'“official”\n'
' string representation of an object. If at all possible, '
'this\n'
' should look like a valid Python expression that could be '
'used to\n'
' recreate an object with the same value (given an '
'appropriate\n'
' environment). If this is not possible, a string of the '
'form\n'
' "<...some useful description...>" should be returned. The '
'return\n'
' value must be a string object. If a class defines '
'"__repr__()" but\n'
' not "__str__()", then "__repr__()" is also used when an '
'“informal”\n'
' string representation of instances of that class is '
'required.\n'
'\n'
' This is typically used for debugging, so it is important '
'that the\n'
' representation is information-rich and unambiguous.\n'
'\n'
'object.__str__(self)\n'
'\n'
' Called by "str(object)" and the built-in functions '
'"format()" and\n'
' "print()" to compute the “informal” or nicely printable '
'string\n'
' representation of an object. The return value must be a '
'string\n'
' object.\n'
'\n'
' This method differs from "object.__repr__()" in that '
'there is no\n'
' expectation that "__str__()" return a valid Python '
'expression: a\n'
' more convenient or concise representation can be used.\n'
'\n'
' The default implementation defined by the built-in type '
'"object"\n'
' calls "object.__repr__()".\n'
'\n'
'object.__bytes__(self)\n'
'\n'
' Called by bytes to compute a byte-string representation '
'of an\n'
' object. This should return a "bytes" object.\n'
'\n'
'object.__format__(self, format_spec)\n'
'\n'
' Called by the "format()" built-in function, and by '
'extension,\n'
' evaluation of formatted string literals and the '
'"str.format()"\n'
' method, to produce a “formatted” string representation of '
'an\n'
' object. The "format_spec" argument is a string that '
'contains a\n'
' description of the formatting options desired. The '
'interpretation\n'
' of the "format_spec" argument is up to the type '
'implementing\n'
' "__format__()", however most classes will either '
'delegate\n'
' formatting to one of the built-in types, or use a '
'similar\n'
' formatting option syntax.\n'
'\n'
' See Format Specification Mini-Language for a description '
'of the\n'
' standard formatting syntax.\n'
'\n'
' The return value must be a string object.\n'
'\n'
' Changed in version 3.4: The __format__ method of "object" '
'itself\n'
' raises a "TypeError" if passed any non-empty string.\n'
'\n'
'object.__lt__(self, other)\n'
'object.__le__(self, other)\n'
'object.__eq__(self, other)\n'
'object.__ne__(self, other)\n'
'object.__gt__(self, other)\n'
'object.__ge__(self, other)\n'
'\n'
' These are the so-called “rich comparison” methods. The\n'
' correspondence between operator symbols and method names '
'is as\n'
' follows: "x<y" calls "x.__lt__(y)", "x<=y" calls '
'"x.__le__(y)",\n'
' "x==y" calls "x.__eq__(y)", "x!=y" calls "x.__ne__(y)", '
'"x>y" calls\n'
' "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)".\n'
'\n'
' A rich comparison method may return the singleton '
'"NotImplemented"\n'
' if it does not implement the operation for a given pair '
'of\n'
' arguments. By convention, "False" and "True" are returned '
'for a\n'
' successful comparison. However, these methods can return '
'any value,\n'
' so if the comparison operator is used in a Boolean '
'context (e.g.,\n'
' in the condition of an "if" statement), Python will call '
'"bool()"\n'
' on the value to determine if the result is true or '
'false.\n'
'\n'
' By default, "__ne__()" delegates to "__eq__()" and '
'inverts the\n'
' result unless it is "NotImplemented". There are no other '
'implied\n'
' relationships among the comparison operators, for '
'example, the\n'
' truth of "(x<y or x==y)" does not imply "x<=y". To '
'automatically\n'
' generate ordering operations from a single root '
'operation, see\n'
' "functools.total_ordering()".\n'
'\n'
' See the paragraph on "__hash__()" for some important '
'notes on\n'
' creating *hashable* objects which support custom '
'comparison\n'
' operations and are usable as dictionary keys.\n'
'\n'
' There are no swapped-argument versions of these methods '
'(to be used\n'
' when the left argument does not support the operation but '
'the right\n'
' argument does); rather, "__lt__()" and "__gt__()" are '
'each other’s\n'
' reflection, "__le__()" and "__ge__()" are each other’s '
'reflection,\n'
' and "__eq__()" and "__ne__()" are their own reflection. '
'If the\n'
' operands are of different types, and right operand’s type '
'is a\n'
' direct or indirect subclass of the left operand’s type, '
'the\n'
' reflected method of the right operand has priority, '
'otherwise the\n'
' left operand’s method has priority. Virtual subclassing '
'is not\n'
' considered.\n'
'\n'
'object.__hash__(self)\n'
'\n'
' Called by built-in function "hash()" and for operations '
'on members\n'
' of hashed collections including "set", "frozenset", and '
'"dict".\n'
' "__hash__()" should return an integer. The only required '
'property\n'
' is that objects which compare equal have the same hash '
'value; it is\n'
' advised to mix together the hash values of the components '
'of the\n'
' object that also play a part in comparison of objects by '
'packing\n'
' them into a tuple and hashing the tuple. Example:\n'
'\n'
' def __hash__(self):\n'
' return hash((self.name, self.nick, self.color))\n'
'\n'
' Note: "hash()" truncates the value returned from an '
'object’s\n'
' custom "__hash__()" method to the size of a '
'"Py_ssize_t". This\n'
' is typically 8 bytes on 64-bit builds and 4 bytes on '
'32-bit\n'
' builds. If an object’s "__hash__()" must interoperate '
'on builds\n'
' of different bit sizes, be sure to check the width on '
'all\n'
' supported builds. An easy way to do this is with '
'"python -c\n'
' "import sys; print(sys.hash_info.width)"".\n'
'\n'
' If a class does not define an "__eq__()" method it should '
'not\n'
' define a "__hash__()" operation either; if it defines '
'"__eq__()"\n'
' but not "__hash__()", its instances will not be usable as '
'items in\n'
' hashable collections. If a class defines mutable objects '
'and\n'
' implements an "__eq__()" method, it should not implement\n'
' "__hash__()", since the implementation of hashable '
'collections\n'
' requires that a key’s hash value is immutable (if the '
'object’s hash\n'
' value changes, it will be in the wrong hash bucket).\n'
'\n'
' User-defined classes have "__eq__()" and "__hash__()" '
'methods by\n'
' default; with them, all objects compare unequal (except '
'with\n'
' themselves) and "x.__hash__()" returns an appropriate '
'value such\n'
' that "x == y" implies both that "x is y" and "hash(x) == '
'hash(y)".\n'
'\n'
' A class that overrides "__eq__()" and does not define '
'"__hash__()"\n'
' will have its "__hash__()" implicitly set to "None". '
'When the\n'
' "__hash__()" method of a class is "None", instances of '
'the class\n'
' will raise an appropriate "TypeError" when a program '
'attempts to\n'
' retrieve their hash value, and will also be correctly '
'identified as\n'
' unhashable when checking "isinstance(obj, '
'collections.Hashable)".\n'
'\n'
' If a class that overrides "__eq__()" needs to retain the\n'
' implementation of "__hash__()" from a parent class, the '
'interpreter\n'
' must be told this explicitly by setting "__hash__ =\n'
' <ParentClass>.__hash__".\n'
'\n'
' If a class that does not override "__eq__()" wishes to '
'suppress\n'
' hash support, it should include "__hash__ = None" in the '
'class\n'
' definition. A class which defines its own "__hash__()" '
'that\n'
' explicitly raises a "TypeError" would be incorrectly '
'identified as\n'
' hashable by an "isinstance(obj, collections.Hashable)" '
'call.\n'
'\n'
' Note: By default, the "__hash__()" values of str, bytes '
'and\n'
' datetime objects are “salted” with an unpredictable '
'random value.\n'
' Although they remain constant within an individual '
'Python\n'
' process, they are not predictable between repeated '
'invocations of\n'
' Python.This is intended to provide protection against a '
'denial-\n'
' of-service caused by carefully-chosen inputs that '
'exploit the\n'
' worst case performance of a dict insertion, O(n^2) '
'complexity.\n'
' See http://www.ocert.org/advisories/ocert-2011-003.html '
'for\n'
' details.Changing hash values affects the iteration '
'order of\n'
' dicts, sets and other mappings. Python has never made '
'guarantees\n'
' about this ordering (and it typically varies between '
'32-bit and\n'
' 64-bit builds).See also "PYTHONHASHSEED".\n'
'\n'
' Changed in version 3.3: Hash randomization is enabled by '
'default.\n'
'\n'
'object.__bool__(self)\n'
'\n'
' Called to implement truth value testing and the built-in '
'operation\n'
' "bool()"; should return "False" or "True". When this '
'method is not\n'
' defined, "__len__()" is called, if it is defined, and the '
'object is\n'
' considered true if its result is nonzero. If a class '
'defines\n'
' neither "__len__()" nor "__bool__()", all its instances '
'are\n'
' considered true.\n'
'\n'
'\n'
'Customizing attribute access\n'
'============================\n'
'\n'
'The following methods can be defined to customize the '
'meaning of\n'
'attribute access (use of, assignment to, or deletion of '
'"x.name") for\n'
'class instances.\n'
'\n'
'object.__getattr__(self, name)\n'
'\n'
' Called when the default attribute access fails with an\n'
' "AttributeError" (either "__getattribute__()" raises an\n'
' "AttributeError" because *name* is not an instance '
'attribute or an\n'
' attribute in the class tree for "self"; or "__get__()" of '
'a *name*\n'
' property raises "AttributeError"). This method should '
'either\n'
' return the (computed) attribute value or raise an '
'"AttributeError"\n'
' exception.\n'
'\n'
' Note that if the attribute is found through the normal '
'mechanism,\n'
' "__getattr__()" is not called. (This is an intentional '
'asymmetry\n'
' between "__getattr__()" and "__setattr__()".) This is '
'done both for\n'
' efficiency reasons and because otherwise "__getattr__()" '
'would have\n'
' no way to access other attributes of the instance. Note '
'that at\n'
' least for instance variables, you can fake total control '
'by not\n'
' inserting any values in the instance attribute dictionary '
'(but\n'
' instead inserting them in another object). See the\n'
' "__getattribute__()" method below for a way to actually '
'get total\n'
' control over attribute access.\n'
'\n'
'object.__getattribute__(self, name)\n'
'\n'
' Called unconditionally to implement attribute accesses '
'for\n'
' instances of the class. If the class also defines '
'"__getattr__()",\n'
' the latter will not be called unless "__getattribute__()" '
'either\n'
' calls it explicitly or raises an "AttributeError". This '
'method\n'
' should return the (computed) attribute value or raise an\n'
' "AttributeError" exception. In order to avoid infinite '
'recursion in\n'
' this method, its implementation should always call the '
'base class\n'
' method with the same name to access any attributes it '
'needs, for\n'
' example, "object.__getattribute__(self, name)".\n'
'\n'
' Note: This method may still be bypassed when looking up '
'special\n'
' methods as the result of implicit invocation via '
'language syntax\n'
' or built-in functions. See Special method lookup.\n'
'\n'
'object.__setattr__(self, name, value)\n'
'\n'
' Called when an attribute assignment is attempted. This '
'is called\n'
' instead of the normal mechanism (i.e. store the value in '
'the\n'
' instance dictionary). *name* is the attribute name, '
'*value* is the\n'
' value to be assigned to it.\n'
'\n'
' If "__setattr__()" wants to assign to an instance '
'attribute, it\n'
' should call the base class method with the same name, for '
'example,\n'
' "object.__setattr__(self, name, value)".\n'
'\n'
'object.__delattr__(self, name)\n'
'\n'
' Like "__setattr__()" but for attribute deletion instead '
'of\n'
' assignment. This should only be implemented if "del '
'obj.name" is\n'
' meaningful for the object.\n'
'\n'
'object.__dir__(self)\n'
'\n'
' Called when "dir()" is called on the object. A sequence '
'must be\n'
' returned. "dir()" converts the returned sequence to a '
'list and\n'
' sorts it.\n'
'\n'
'\n'
'Customizing module attribute access\n'
'-----------------------------------\n'
'\n'
'For a more fine grained customization of the module behavior '
'(setting\n'
'attributes, properties, etc.), one can set the "__class__" '
'attribute\n'
'of a module object to a subclass of "types.ModuleType". For '
'example:\n'
'\n'
' import sys\n'
' from types import ModuleType\n'
'\n'
' class VerboseModule(ModuleType):\n'
' def __repr__(self):\n'
" return f'Verbose {self.__name__}'\n"
'\n'
' def __setattr__(self, attr, value):\n'
" print(f'Setting {attr}...')\n"
' setattr(self, attr, value)\n'
'\n'
' sys.modules[__name__].__class__ = VerboseModule\n'
'\n'
'Note: Setting module "__class__" only affects lookups made '
'using the\n'
' attribute access syntax – directly accessing the module '
'globals\n'
' (whether by code within the module, or via a reference to '
'the\n'
' module’s globals dictionary) is unaffected.\n'
'\n'
'Changed in version 3.5: "__class__" module attribute is now '
'writable.\n'
'\n'
'\n'
'Implementing Descriptors\n'
'------------------------\n'
'\n'
'The following methods only apply when an instance of the '
'class\n'
'containing the method (a so-called *descriptor* class) '
'appears in an\n'
'*owner* class (the descriptor must be in either the owner’s '
'class\n'
'dictionary or in the class dictionary for one of its '
'parents). In the\n'
'examples below, “the attribute” refers to the attribute '
'whose name is\n'
'the key of the property in the owner class’ "__dict__".\n'
'\n'
'object.__get__(self, instance, owner)\n'
'\n'
' Called to get the attribute of the owner class (class '
'attribute\n'
' access) or of an instance of that class (instance '
'attribute\n'
' access). *owner* is always the owner class, while '
'*instance* is the\n'
' instance that the attribute was accessed through, or '
'"None" when\n'
' the attribute is accessed through the *owner*. This '
'method should\n'
' return the (computed) attribute value or raise an '
'"AttributeError"\n'
' exception.\n'
'\n'
'object.__set__(self, instance, value)\n'
'\n'
' Called to set the attribute on an instance *instance* of '
'the owner\n'
' class to a new value, *value*.\n'
'\n'
'object.__delete__(self, instance)\n'
'\n'
' Called to delete the attribute on an instance *instance* '
'of the\n'
' owner class.\n'
'\n'
'object.__set_name__(self, owner, name)\n'
'\n'
' Called at the time the owning class *owner* is created. '
'The\n'
' descriptor has been assigned to *name*.\n'
'\n'
' New in version 3.6.\n'
'\n'
'The attribute "__objclass__" is interpreted by the "inspect" '
'module as\n'
'specifying the class where this object was defined (setting '
'this\n'
'appropriately can assist in runtime introspection of dynamic '
'class\n'
'attributes). For callables, it may indicate that an instance '
'of the\n'
'given type (or a subclass) is expected or required as the '
'first\n'
'positional argument (for example, CPython sets this '
'attribute for\n'
'unbound methods that are implemented in C).\n'
'\n'
'\n'
'Invoking Descriptors\n'
'--------------------\n'
'\n'
'In general, a descriptor is an object attribute with '
'“binding\n'
'behavior”, one whose attribute access has been overridden by '
'methods\n'
'in the descriptor protocol: "__get__()", "__set__()", and\n'
'"__delete__()". If any of those methods are defined for an '
'object, it\n'
'is said to be a descriptor.\n'
'\n'
'The default behavior for attribute access is to get, set, or '
'delete\n'
'the attribute from an object’s dictionary. For instance, '
'"a.x" has a\n'
'lookup chain starting with "a.__dict__[\'x\']", then\n'
'"type(a).__dict__[\'x\']", and continuing through the base '
'classes of\n'
'"type(a)" excluding metaclasses.\n'
'\n'
'However, if the looked-up value is an object defining one of '
'the\n'
'descriptor methods, then Python may override the default '
'behavior and\n'
'invoke the descriptor method instead. Where this occurs in '
'the\n'
'precedence chain depends on which descriptor methods were '
'defined and\n'
'how they were called.\n'
'\n'
'The starting point for descriptor invocation is a binding, '
'"a.x". How\n'
'the arguments are assembled depends on "a":\n'
'\n'
'Direct Call\n'
' The simplest and least common call is when user code '
'directly\n'
' invokes a descriptor method: "x.__get__(a)".\n'
'\n'
'Instance Binding\n'
' If binding to an object instance, "a.x" is transformed '
'into the\n'
' call: "type(a).__dict__[\'x\'].__get__(a, type(a))".\n'
'\n'
'Class Binding\n'
' If binding to a class, "A.x" is transformed into the '
'call:\n'
' "A.__dict__[\'x\'].__get__(None, A)".\n'
'\n'
'Super Binding\n'
' If "a" is an instance of "super", then the binding '
'"super(B,\n'
' obj).m()" searches "obj.__class__.__mro__" for the base '
'class "A"\n'
' immediately preceding "B" and then invokes the descriptor '
'with the\n'
' call: "A.__dict__[\'m\'].__get__(obj, obj.__class__)".\n'
'\n'
'For instance bindings, the precedence of descriptor '
'invocation depends\n'
'on the which descriptor methods are defined. A descriptor '
'can define\n'
'any combination of "__get__()", "__set__()" and '
'"__delete__()". If it\n'
'does not define "__get__()", then accessing the attribute '
'will return\n'
'the descriptor object itself unless there is a value in the '
'object’s\n'
'instance dictionary. If the descriptor defines "__set__()" '
'and/or\n'
'"__delete__()", it is a data descriptor; if it defines '
'neither, it is\n'
'a non-data descriptor. Normally, data descriptors define '
'both\n'
'"__get__()" and "__set__()", while non-data descriptors have '
'just the\n'
'"__get__()" method. Data descriptors with "__set__()" and '
'"__get__()"\n'
'defined always override a redefinition in an instance '
'dictionary. In\n'
'contrast, non-data descriptors can be overridden by '
'instances.\n'
'\n'
'Python methods (including "staticmethod()" and '
'"classmethod()") are\n'
'implemented as non-data descriptors. Accordingly, instances '
'can\n'
'redefine and override methods. This allows individual '
'instances to\n'
'acquire behaviors that differ from other instances of the '
'same class.\n'
'\n'
'The "property()" function is implemented as a data '
'descriptor.\n'
'Accordingly, instances cannot override the behavior of a '
'property.\n'
'\n'
'\n'
'__slots__\n'
'---------\n'
'\n'
'*__slots__* allow us to explicitly declare data members '
'(like\n'
'properties) and deny the creation of *__dict__* and '
'*__weakref__*\n'
'(unless explicitly declared in *__slots__* or available in a '
'parent.)\n'
'\n'
'The space saved over using *__dict__* can be significant.\n'
'\n'
'object.__slots__\n'
'\n'
' This class variable can be assigned a string, iterable, '
'or sequence\n'
' of strings with variable names used by instances. '
'*__slots__*\n'
' reserves space for the declared variables and prevents '
'the\n'
' automatic creation of *__dict__* and *__weakref__* for '
'each\n'
' instance.\n'
'\n'
'\n'
'Notes on using *__slots__*\n'
'~~~~~~~~~~~~~~~~~~~~~~~~~~\n'
'\n'
'* When inheriting from a class without *__slots__*, the '
'*__dict__*\n'
' and *__weakref__* attribute of the instances will always '
'be\n'
' accessible.\n'
'\n'
'* Without a *__dict__* variable, instances cannot be '
'assigned new\n'
' variables not listed in the *__slots__* definition. '
'Attempts to\n'
' assign to an unlisted variable name raises '
'"AttributeError". If\n'
' dynamic assignment of new variables is desired, then add\n'
' "\'__dict__\'" to the sequence of strings in the '
'*__slots__*\n'
' declaration.\n'
'\n'
'* Without a *__weakref__* variable for each instance, '
'classes\n'
' defining *__slots__* do not support weak references to '
'its\n'
' instances. If weak reference support is needed, then add\n'
' "\'__weakref__\'" to the sequence of strings in the '
'*__slots__*\n'
' declaration.\n'
'\n'
'* *__slots__* are implemented at the class level by '
'creating\n'
' descriptors (Implementing Descriptors) for each variable '
'name. As a\n'
' result, class attributes cannot be used to set default '
'values for\n'
' instance variables defined by *__slots__*; otherwise, the '
'class\n'
' attribute would overwrite the descriptor assignment.\n'
'\n'
'* The action of a *__slots__* declaration is not limited to '
'the\n'
' class where it is defined. *__slots__* declared in '
'parents are\n'
' available in child classes. However, child subclasses will '
'get a\n'
' *__dict__* and *__weakref__* unless they also define '
'*__slots__*\n'
' (which should only contain names of any *additional* '
'slots).\n'
'\n'
'* If a class defines a slot also defined in a base class, '
'the\n'
' instance variable defined by the base class slot is '
'inaccessible\n'
' (except by retrieving its descriptor directly from the '
'base class).\n'
' This renders the meaning of the program undefined. In the '
'future, a\n'
' check may be added to prevent this.\n'
'\n'
'* Nonempty *__slots__* does not work for classes derived '
'from\n'
' “variable-length” built-in types such as "int", "bytes" '
'and "tuple".\n'
'\n'
'* Any non-string iterable may be assigned to *__slots__*. '
'Mappings\n'
' may also be used; however, in the future, special meaning '
'may be\n'
' assigned to the values corresponding to each key.\n'
'\n'
'* *__class__* assignment works only if both classes have the '
'same\n'
' *__slots__*.\n'
'\n'
'* Multiple inheritance with multiple slotted parent classes '
'can be\n'
' used, but only one parent is allowed to have attributes '
'created by\n'
' slots (the other bases must have empty slot layouts) - '
'violations\n'
' raise "TypeError".\n'
'\n'
'\n'
'Customizing class creation\n'
'==========================\n'
'\n'
'Whenever a class inherits from another class, '
'*__init_subclass__* is\n'
'called on that class. This way, it is possible to write '
'classes which\n'
'change the behavior of subclasses. This is closely related '
'to class\n'
'decorators, but where class decorators only affect the '
'specific class\n'
'they’re applied to, "__init_subclass__" solely applies to '
'future\n'
'subclasses of the class defining the method.\n'
'\n'
'classmethod object.__init_subclass__(cls)\n'
'\n'
' This method is called whenever the containing class is '
'subclassed.\n'
' *cls* is then the new subclass. If defined as a normal '
'instance\n'
' method, this method is implicitly converted to a class '
'method.\n'
'\n'
' Keyword arguments which are given to a new class are '
'passed to the\n'
' parent’s class "__init_subclass__". For compatibility '
'with other\n'
' classes using "__init_subclass__", one should take out '
'the needed\n'
' keyword arguments and pass the others over to the base '
'class, as\n'
' in:\n'
'\n'
' class Philosopher:\n'
' def __init_subclass__(cls, default_name, '
'**kwargs):\n'
' super().__init_subclass__(**kwargs)\n'
' cls.default_name = default_name\n'
'\n'
' class AustralianPhilosopher(Philosopher, '
'default_name="Bruce"):\n'
' pass\n'
'\n'
' The default implementation "object.__init_subclass__" '
'does nothing,\n'
' but raises an error if it is called with any arguments.\n'
'\n'
' Note: The metaclass hint "metaclass" is consumed by the '
'rest of\n'
' the type machinery, and is never passed to '
'"__init_subclass__"\n'
' implementations. The actual metaclass (rather than the '
'explicit\n'
' hint) can be accessed as "type(cls)".\n'
'\n'
' New in version 3.6.\n'
'\n'
'\n'
'Metaclasses\n'
'-----------\n'
'\n'
'By default, classes are constructed using "type()". The '
'class body is\n'
'executed in a new namespace and the class name is bound '
'locally to the\n'
'result of "type(name, bases, namespace)".\n'
'\n'
'The class creation process can be customized by passing the\n'
'"metaclass" keyword argument in the class definition line, '
'or by\n'
'inheriting from an existing class that included such an '
'argument. In\n'
'the following example, both "MyClass" and "MySubclass" are '
'instances\n'
'of "Meta":\n'
'\n'
' class Meta(type):\n'
' pass\n'
'\n'
' class MyClass(metaclass=Meta):\n'
' pass\n'
'\n'
' class MySubclass(MyClass):\n'
' pass\n'
'\n'
'Any other keyword arguments that are specified in the class '
'definition\n'
'are passed through to all metaclass operations described '
'below.\n'
'\n'
'When a class definition is executed, the following steps '
'occur:\n'
'\n'
'* the appropriate metaclass is determined\n'
'\n'
'* the class namespace is prepared\n'
'\n'
'* the class body is executed\n'
'\n'
'* the class object is created\n'
'\n'
'\n'
'Determining the appropriate metaclass\n'
'-------------------------------------\n'
'\n'
'The appropriate metaclass for a class definition is '
'determined as\n'
'follows:\n'
'\n'
'* if no bases and no explicit metaclass are given, then '
'"type()" is\n'
' used\n'
'\n'
'* if an explicit metaclass is given and it is *not* an '
'instance of\n'
' "type()", then it is used directly as the metaclass\n'
'\n'
'* if an instance of "type()" is given as the explicit '
'metaclass, or\n'
' bases are defined, then the most derived metaclass is '
'used\n'
'\n'
'The most derived metaclass is selected from the explicitly '
'specified\n'
'metaclass (if any) and the metaclasses (i.e. "type(cls)") of '
'all\n'
'specified base classes. The most derived metaclass is one '
'which is a\n'
'subtype of *all* of these candidate metaclasses. If none of '
'the\n'
'candidate metaclasses meets that criterion, then the class '
'definition\n'
'will fail with "TypeError".\n'
'\n'
'\n'
'Preparing the class namespace\n'
'-----------------------------\n'
'\n'
'Once the appropriate metaclass has been identified, then the '
'class\n'
'namespace is prepared. If the metaclass has a "__prepare__" '
'attribute,\n'
'it is called as "namespace = metaclass.__prepare__(name, '
'bases,\n'
'**kwds)" (where the additional keyword arguments, if any, '
'come from\n'
'the class definition).\n'
'\n'
'If the metaclass has no "__prepare__" attribute, then the '
'class\n'
'namespace is initialised as an empty ordered mapping.\n'
'\n'
'See also:\n'
'\n'
' **PEP 3115** - Metaclasses in Python 3000\n'
' Introduced the "__prepare__" namespace hook\n'
'\n'
'\n'
'Executing the class body\n'
'------------------------\n'
'\n'
'The class body is executed (approximately) as "exec(body, '
'globals(),\n'
'namespace)". The key difference from a normal call to '
'"exec()" is that\n'
'lexical scoping allows the class body (including any '
'methods) to\n'
'reference names from the current and outer scopes when the '
'class\n'
'definition occurs inside a function.\n'
'\n'
'However, even when the class definition occurs inside the '
'function,\n'
'methods defined inside the class still cannot see names '
'defined at the\n'
'class scope. Class variables must be accessed through the '
'first\n'
'parameter of instance or class methods, or through the '
'implicit\n'
'lexically scoped "__class__" reference described in the next '
'section.\n'
'\n'
'\n'
'Creating the class object\n'
'-------------------------\n'
'\n'
'Once the class namespace has been populated by executing the '
'class\n'
'body, the class object is created by calling '
'"metaclass(name, bases,\n'
'namespace, **kwds)" (the additional keywords passed here are '
'the same\n'
'as those passed to "__prepare__").\n'
'\n'
'This class object is the one that will be referenced by the '
'zero-\n'
'argument form of "super()". "__class__" is an implicit '
'closure\n'
'reference created by the compiler if any methods in a class '
'body refer\n'
'to either "__class__" or "super". This allows the zero '
'argument form\n'
'of "super()" to correctly identify the class being defined '
'based on\n'
'lexical scoping, while the class or instance that was used '
'to make the\n'
'current call is identified based on the first argument '
'passed to the\n'
'method.\n'
'\n'
'**CPython implementation detail:** In CPython 3.6 and later, '
'the\n'
'"__class__" cell is passed to the metaclass as a '
'"__classcell__" entry\n'
'in the class namespace. If present, this must be propagated '
'up to the\n'
'"type.__new__" call in order for the class to be '
'initialised\n'
'correctly. Failing to do so will result in a '
'"DeprecationWarning" in\n'
'Python 3.6, and a "RuntimeError" in Python 3.8.\n'
'\n'
'When using the default metaclass "type", or any metaclass '
'that\n'
'ultimately calls "type.__new__", the following additional\n'
'customisation steps are invoked after creating the class '
'object:\n'
'\n'
'* first, "type.__new__" collects all of the descriptors in '
'the class\n'
' namespace that define a "__set_name__()" method;\n'
'\n'
'* second, all of these "__set_name__" methods are called '
'with the\n'
' class being defined and the assigned name of that '
'particular\n'
' descriptor; and\n'
'\n'
'* finally, the "__init_subclass__()" hook is called on the '
'immediate\n'
' parent of the new class in its method resolution order.\n'
'\n'
'After the class object is created, it is passed to the '
'class\n'
'decorators included in the class definition (if any) and the '
'resulting\n'
'object is bound in the local namespace as the defined '
'class.\n'
'\n'
'When a new class is created by "type.__new__", the object '
'provided as\n'
'the namespace parameter is copied to a new ordered mapping '
'and the\n'
'original object is discarded. The new copy is wrapped in a '
'read-only\n'
'proxy, which becomes the "__dict__" attribute of the class '
'object.\n'
'\n'
'See also:\n'
'\n'
' **PEP 3135** - New super\n'
' Describes the implicit "__class__" closure reference\n'
'\n'
'\n'
'Metaclass example\n'
'-----------------\n'
'\n'
'The potential uses for metaclasses are boundless. Some ideas '
'that have\n'
'been explored include enum, logging, interface checking, '
'automatic\n'
'delegation, automatic property creation, proxies, '
'frameworks, and\n'
'automatic resource locking/synchronization.\n'
'\n'
'Here is an example of a metaclass that uses an\n'
'"collections.OrderedDict" to remember the order that class '
'variables\n'
'are defined:\n'
'\n'
' class OrderedClass(type):\n'
'\n'
' @classmethod\n'
' def __prepare__(metacls, name, bases, **kwds):\n'
' return collections.OrderedDict()\n'
'\n'
' def __new__(cls, name, bases, namespace, **kwds):\n'
' result = type.__new__(cls, name, bases, '
'dict(namespace))\n'
' result.members = tuple(namespace)\n'
' return result\n'
'\n'
' class A(metaclass=OrderedClass):\n'
' def one(self): pass\n'
' def two(self): pass\n'
' def three(self): pass\n'
' def four(self): pass\n'
'\n'
' >>> A.members\n'
" ('__module__', 'one', 'two', 'three', 'four')\n"
'\n'
'When the class definition for *A* gets executed, the process '
'begins\n'
'with calling the metaclass’s "__prepare__()" method which '
'returns an\n'
'empty "collections.OrderedDict". That mapping records the '
'methods and\n'
'attributes of *A* as they are defined within the body of the '
'class\n'
'statement. Once those definitions are executed, the ordered '
'dictionary\n'
'is fully populated and the metaclass’s "__new__()" method '
'gets\n'
'invoked. That method builds the new type and it saves the '
'ordered\n'
'dictionary keys in an attribute called "members".\n'
'\n'
'\n'
'Customizing instance and subclass checks\n'
'========================================\n'
'\n'
'The following methods are used to override the default '
'behavior of the\n'
'"isinstance()" and "issubclass()" built-in functions.\n'
'\n'
'In particular, the metaclass "abc.ABCMeta" implements these '
'methods in\n'
'order to allow the addition of Abstract Base Classes (ABCs) '
'as\n'
'“virtual base classes” to any class or type (including '
'built-in\n'
'types), including other ABCs.\n'
'\n'
'class.__instancecheck__(self, instance)\n'
'\n'
' Return true if *instance* should be considered a (direct '
'or\n'
' indirect) instance of *class*. If defined, called to '
'implement\n'
' "isinstance(instance, class)".\n'
'\n'
'class.__subclasscheck__(self, subclass)\n'
'\n'
' Return true if *subclass* should be considered a (direct '
'or\n'
' indirect) subclass of *class*. If defined, called to '
'implement\n'
' "issubclass(subclass, class)".\n'
'\n'
'Note that these methods are looked up on the type '
'(metaclass) of a\n'
'class. They cannot be defined as class methods in the '
'actual class.\n'
'This is consistent with the lookup of special methods that '
'are called\n'
'on instances, only in this case the instance is itself a '
'class.\n'
'\n'
'See also:\n'
'\n'
' **PEP 3119** - Introducing Abstract Base Classes\n'
' Includes the specification for customizing '
'"isinstance()" and\n'
' "issubclass()" behavior through "__instancecheck__()" '
'and\n'
' "__subclasscheck__()", with motivation for this '
'functionality in\n'
' the context of adding Abstract Base Classes (see the '
'"abc"\n'
' module) to the language.\n'
'\n'
'\n'
'Emulating callable objects\n'
'==========================\n'
'\n'
'object.__call__(self[, args...])\n'
'\n'
' Called when the instance is “called” as a function; if '
'this method\n'
' is defined, "x(arg1, arg2, ...)" is a shorthand for\n'
' "x.__call__(arg1, arg2, ...)".\n'
'\n'
'\n'
'Emulating container types\n'
'=========================\n'
'\n'
'The following methods can be defined to implement container '
'objects.\n'
'Containers usually are sequences (such as lists or tuples) '
'or mappings\n'
'(like dictionaries), but can represent other containers as '
'well. The\n'
'first set of methods is used either to emulate a sequence or '
'to\n'
'emulate a mapping; the difference is that for a sequence, '
'the\n'
'allowable keys should be the integers *k* for which "0 <= k '
'< N" where\n'
'*N* is the length of the sequence, or slice objects, which '
'define a\n'
'range of items. It is also recommended that mappings '
'provide the\n'
'methods "keys()", "values()", "items()", "get()", '
'"clear()",\n'
'"setdefault()", "pop()", "popitem()", "copy()", and '
'"update()"\n'
'behaving similar to those for Python’s standard dictionary '
'objects.\n'
'The "collections" module provides a "MutableMapping" '
'abstract base\n'
'class to help create those methods from a base set of '
'"__getitem__()",\n'
'"__setitem__()", "__delitem__()", and "keys()". Mutable '
'sequences\n'
'should provide methods "append()", "count()", "index()", '
'"extend()",\n'
'"insert()", "pop()", "remove()", "reverse()" and "sort()", '
'like Python\n'
'standard list objects. Finally, sequence types should '
'implement\n'
'addition (meaning concatenation) and multiplication '
'(meaning\n'
'repetition) by defining the methods "__add__()", '
'"__radd__()",\n'
'"__iadd__()", "__mul__()", "__rmul__()" and "__imul__()" '
'described\n'
'below; they should not define other numerical operators. It '
'is\n'
'recommended that both mappings and sequences implement the\n'
'"__contains__()" method to allow efficient use of the "in" '
'operator;\n'
'for mappings, "in" should search the mapping’s keys; for '
'sequences, it\n'
'should search through the values. It is further recommended '
'that both\n'
'mappings and sequences implement the "__iter__()" method to '
'allow\n'
'efficient iteration through the container; for mappings, '
'"__iter__()"\n'
'should be the same as "keys()"; for sequences, it should '
'iterate\n'
'through the values.\n'
'\n'
'object.__len__(self)\n'
'\n'
' Called to implement the built-in function "len()". '
'Should return\n'
' the length of the object, an integer ">=" 0. Also, an '
'object that\n'
' doesn’t define a "__bool__()" method and whose '
'"__len__()" method\n'
' returns zero is considered to be false in a Boolean '
'context.\n'
'\n'
' **CPython implementation detail:** In CPython, the length '
'is\n'
' required to be at most "sys.maxsize". If the length is '
'larger than\n'
' "sys.maxsize" some features (such as "len()") may raise\n'
' "OverflowError". To prevent raising "OverflowError" by '
'truth value\n'
' testing, an object must define a "__bool__()" method.\n'
'\n'
'object.__length_hint__(self)\n'
'\n'
' Called to implement "operator.length_hint()". Should '
'return an\n'
' estimated length for the object (which may be greater or '
'less than\n'
' the actual length). The length must be an integer ">=" 0. '
'This\n'
' method is purely an optimization and is never required '
'for\n'
' correctness.\n'
'\n'
' New in version 3.4.\n'
'\n'
'Note: Slicing is done exclusively with the following three '
'methods.\n'
' A call like\n'
'\n'
' a[1:2] = b\n'
'\n'
' is translated to\n'
'\n'
' a[slice(1, 2, None)] = b\n'
'\n'
' and so forth. Missing slice items are always filled in '
'with "None".\n'
'\n'
'object.__getitem__(self, key)\n'
'\n'
' Called to implement evaluation of "self[key]". For '
'sequence types,\n'
' the accepted keys should be integers and slice objects. '
'Note that\n'
' the special interpretation of negative indexes (if the '
'class wishes\n'
' to emulate a sequence type) is up to the "__getitem__()" '
'method. If\n'
' *key* is of an inappropriate type, "TypeError" may be '
'raised; if of\n'
' a value outside the set of indexes for the sequence '
'(after any\n'
' special interpretation of negative values), "IndexError" '
'should be\n'
' raised. For mapping types, if *key* is missing (not in '
'the\n'
' container), "KeyError" should be raised.\n'
'\n'
' Note: "for" loops expect that an "IndexError" will be '
'raised for\n'
' illegal indexes to allow proper detection of the end of '
'the\n'
' sequence.\n'
'\n'
'object.__missing__(self, key)\n'
'\n'
' Called by "dict"."__getitem__()" to implement "self[key]" '
'for dict\n'
' subclasses when key is not in the dictionary.\n'
'\n'
'object.__setitem__(self, key, value)\n'
'\n'
' Called to implement assignment to "self[key]". Same note '
'as for\n'
' "__getitem__()". This should only be implemented for '
'mappings if\n'
' the objects support changes to the values for keys, or if '
'new keys\n'
' can be added, or for sequences if elements can be '
'replaced. The\n'
' same exceptions should be raised for improper *key* '
'values as for\n'
' the "__getitem__()" method.\n'
'\n'
'object.__delitem__(self, key)\n'
'\n'
' Called to implement deletion of "self[key]". Same note '
'as for\n'
' "__getitem__()". This should only be implemented for '
'mappings if\n'
' the objects support removal of keys, or for sequences if '
'elements\n'
' can be removed from the sequence. The same exceptions '
'should be\n'
' raised for improper *key* values as for the '
'"__getitem__()" method.\n'
'\n'
'object.__iter__(self)\n'
'\n'
' This method is called when an iterator is required for a '
'container.\n'
' This method should return a new iterator object that can '
'iterate\n'
' over all the objects in the container. For mappings, it '
'should\n'
' iterate over the keys of the container.\n'
'\n'
' Iterator objects also need to implement this method; they '
'are\n'
' required to return themselves. For more information on '
'iterator\n'
' objects, see Iterator Types.\n'
'\n'
'object.__reversed__(self)\n'
'\n'
' Called (if present) by the "reversed()" built-in to '
'implement\n'
' reverse iteration. It should return a new iterator '
'object that\n'
' iterates over all the objects in the container in reverse '
'order.\n'
'\n'
' If the "__reversed__()" method is not provided, the '
'"reversed()"\n'
' built-in will fall back to using the sequence protocol '
'("__len__()"\n'
' and "__getitem__()"). Objects that support the sequence '
'protocol\n'
' should only provide "__reversed__()" if they can provide '
'an\n'
' implementation that is more efficient than the one '
'provided by\n'
' "reversed()".\n'
'\n'
'The membership test operators ("in" and "not in") are '
'normally\n'
'implemented as an iteration through a sequence. However, '
'container\n'
'objects can supply the following special method with a more '
'efficient\n'
'implementation, which also does not require the object be a '
'sequence.\n'
'\n'
'object.__contains__(self, item)\n'
'\n'
' Called to implement membership test operators. Should '
'return true\n'
' if *item* is in *self*, false otherwise. For mapping '
'objects, this\n'
' should consider the keys of the mapping rather than the '
'values or\n'
' the key-item pairs.\n'
'\n'
' For objects that don’t define "__contains__()", the '
'membership test\n'
' first tries iteration via "__iter__()", then the old '
'sequence\n'
' iteration protocol via "__getitem__()", see this section '
'in the\n'
' language reference.\n'
'\n'
'\n'
'Emulating numeric types\n'
'=======================\n'
'\n'
'The following methods can be defined to emulate numeric '
'objects.\n'
'Methods corresponding to operations that are not supported '
'by the\n'
'particular kind of number implemented (e.g., bitwise '
'operations for\n'
'non-integral numbers) should be left undefined.\n'
'\n'
'object.__add__(self, other)\n'
'object.__sub__(self, other)\n'
'object.__mul__(self, other)\n'
'object.__matmul__(self, other)\n'
'object.__truediv__(self, other)\n'
'object.__floordiv__(self, other)\n'
'object.__mod__(self, other)\n'
'object.__divmod__(self, other)\n'
'object.__pow__(self, other[, modulo])\n'
'object.__lshift__(self, other)\n'
'object.__rshift__(self, other)\n'
'object.__and__(self, other)\n'
'object.__xor__(self, other)\n'
'object.__or__(self, other)\n'
'\n'
' These methods are called to implement the binary '
'arithmetic\n'
' operations ("+", "-", "*", "@", "/", "//", "%", '
'"divmod()",\n'
' "pow()", "**", "<<", ">>", "&", "^", "|"). For instance, '
'to\n'
' evaluate the expression "x + y", where *x* is an instance '
'of a\n'
' class that has an "__add__()" method, "x.__add__(y)" is '
'called.\n'
' The "__divmod__()" method should be the equivalent to '
'using\n'
' "__floordiv__()" and "__mod__()"; it should not be '
'related to\n'
' "__truediv__()". Note that "__pow__()" should be defined '
'to accept\n'
' an optional third argument if the ternary version of the '
'built-in\n'
' "pow()" function is to be supported.\n'
'\n'
' If one of those methods does not support the operation '
'with the\n'
' supplied arguments, it should return "NotImplemented".\n'
'\n'
'object.__radd__(self, other)\n'
'object.__rsub__(self, other)\n'
'object.__rmul__(self, other)\n'
'object.__rmatmul__(self, other)\n'
'object.__rtruediv__(self, other)\n'
'object.__rfloordiv__(self, other)\n'
'object.__rmod__(self, other)\n'
'object.__rdivmod__(self, other)\n'
'object.__rpow__(self, other)\n'
'object.__rlshift__(self, other)\n'
'object.__rrshift__(self, other)\n'
'object.__rand__(self, other)\n'
'object.__rxor__(self, other)\n'
'object.__ror__(self, other)\n'
'\n'
' These methods are called to implement the binary '
'arithmetic\n'
' operations ("+", "-", "*", "@", "/", "//", "%", '
'"divmod()",\n'
' "pow()", "**", "<<", ">>", "&", "^", "|") with reflected '
'(swapped)\n'
' operands. These functions are only called if the left '
'operand does\n'
' not support the corresponding operation [3] and the '
'operands are of\n'
' different types. [4] For instance, to evaluate the '
'expression "x -\n'
' y", where *y* is an instance of a class that has an '
'"__rsub__()"\n'
' method, "y.__rsub__(x)" is called if "x.__sub__(y)" '
'returns\n'
' *NotImplemented*.\n'
'\n'
' Note that ternary "pow()" will not try calling '
'"__rpow__()" (the\n'
' coercion rules would become too complicated).\n'
'\n'
' Note: If the right operand’s type is a subclass of the '
'left\n'
' operand’s type and that subclass provides the reflected '
'method\n'
' for the operation, this method will be called before '
'the left\n'
' operand’s non-reflected method. This behavior allows '
'subclasses\n'
' to override their ancestors’ operations.\n'
'\n'
'object.__iadd__(self, other)\n'
'object.__isub__(self, other)\n'
'object.__imul__(self, other)\n'
'object.__imatmul__(self, other)\n'
'object.__itruediv__(self, other)\n'
'object.__ifloordiv__(self, other)\n'
'object.__imod__(self, other)\n'
'object.__ipow__(self, other[, modulo])\n'
'object.__ilshift__(self, other)\n'
'object.__irshift__(self, other)\n'
'object.__iand__(self, other)\n'
'object.__ixor__(self, other)\n'
'object.__ior__(self, other)\n'
'\n'
' These methods are called to implement the augmented '
'arithmetic\n'
' assignments ("+=", "-=", "*=", "@=", "/=", "//=", "%=", '
'"**=",\n'
' "<<=", ">>=", "&=", "^=", "|="). These methods should '
'attempt to\n'
' do the operation in-place (modifying *self*) and return '
'the result\n'
' (which could be, but does not have to be, *self*). If a '
'specific\n'
' method is not defined, the augmented assignment falls '
'back to the\n'
' normal methods. For instance, if *x* is an instance of a '
'class\n'
' with an "__iadd__()" method, "x += y" is equivalent to "x '
'=\n'
' x.__iadd__(y)" . Otherwise, "x.__add__(y)" and '
'"y.__radd__(x)" are\n'
' considered, as with the evaluation of "x + y". In '
'certain\n'
' situations, augmented assignment can result in unexpected '
'errors\n'
' (see Why does a_tuple[i] += [‘item’] raise an exception '
'when the\n'
' addition works?), but this behavior is in fact part of '
'the data\n'
' model.\n'
'\n'
'object.__neg__(self)\n'
'object.__pos__(self)\n'
'object.__abs__(self)\n'
'object.__invert__(self)\n'
'\n'
' Called to implement the unary arithmetic operations ("-", '
'"+",\n'
' "abs()" and "~").\n'
'\n'
'object.__complex__(self)\n'
'object.__int__(self)\n'
'object.__float__(self)\n'
'\n'
' Called to implement the built-in functions "complex()", '
'"int()" and\n'
' "float()". Should return a value of the appropriate '
'type.\n'
'\n'
'object.__index__(self)\n'
'\n'
' Called to implement "operator.index()", and whenever '
'Python needs\n'
' to losslessly convert the numeric object to an integer '
'object (such\n'
' as in slicing, or in the built-in "bin()", "hex()" and '
'"oct()"\n'
' functions). Presence of this method indicates that the '
'numeric\n'
' object is an integer type. Must return an integer.\n'
'\n'
' Note: In order to have a coherent integer type class, '
'when\n'
' "__index__()" is defined "__int__()" should also be '
'defined, and\n'
' both should return the same value.\n'
'\n'
'object.__round__(self[, ndigits])\n'
'object.__trunc__(self)\n'
'object.__floor__(self)\n'
'object.__ceil__(self)\n'
'\n'
' Called to implement the built-in function "round()" and '
'"math"\n'
' functions "trunc()", "floor()" and "ceil()". Unless '
'*ndigits* is\n'
' passed to "__round__()" all these methods should return '
'the value\n'
' of the object truncated to an "Integral" (typically an '
'"int").\n'
'\n'
' If "__int__()" is not defined then the built-in function '
'"int()"\n'
' falls back to "__trunc__()".\n'
'\n'
'\n'
'With Statement Context Managers\n'
'===============================\n'
'\n'
'A *context manager* is an object that defines the runtime '
'context to\n'
'be established when executing a "with" statement. The '
'context manager\n'
'handles the entry into, and the exit from, the desired '
'runtime context\n'
'for the execution of the block of code. Context managers '
'are normally\n'
'invoked using the "with" statement (described in section The '
'with\n'
'statement), but can also be used by directly invoking their '
'methods.\n'
'\n'
'Typical uses of context managers include saving and '
'restoring various\n'
'kinds of global state, locking and unlocking resources, '
'closing opened\n'
'files, etc.\n'
'\n'
'For more information on context managers, see Context '
'Manager Types.\n'
'\n'
'object.__enter__(self)\n'
'\n'
' Enter the runtime context related to this object. The '
'"with"\n'
' statement will bind this method’s return value to the '
'target(s)\n'
' specified in the "as" clause of the statement, if any.\n'
'\n'
'object.__exit__(self, exc_type, exc_value, traceback)\n'
'\n'
' Exit the runtime context related to this object. The '
'parameters\n'
' describe the exception that caused the context to be '
'exited. If the\n'
' context was exited without an exception, all three '
'arguments will\n'
' be "None".\n'
'\n'
' If an exception is supplied, and the method wishes to '
'suppress the\n'
' exception (i.e., prevent it from being propagated), it '
'should\n'
' return a true value. Otherwise, the exception will be '
'processed\n'
' normally upon exit from this method.\n'
'\n'
' Note that "__exit__()" methods should not reraise the '
'passed-in\n'
' exception; this is the caller’s responsibility.\n'
'\n'
'See also:\n'
'\n'
' **PEP 343** - The “with” statement\n'
' The specification, background, and examples for the '
'Python "with"\n'
' statement.\n'
'\n'
'\n'
'Special method lookup\n'
'=====================\n'
'\n'
'For custom classes, implicit invocations of special methods '
'are only\n'
'guaranteed to work correctly if defined on an object’s type, '
'not in\n'
'the object’s instance dictionary. That behaviour is the '
'reason why\n'
'the following code raises an exception:\n'
'\n'
' >>> class C:\n'
' ... pass\n'
' ...\n'
' >>> c = C()\n'
' >>> c.__len__ = lambda: 5\n'
' >>> len(c)\n'
' Traceback (most recent call last):\n'
' File "<stdin>", line 1, in <module>\n'
" TypeError: object of type 'C' has no len()\n"
'\n'
'The rationale behind this behaviour lies with a number of '
'special\n'
'methods such as "__hash__()" and "__repr__()" that are '
'implemented by\n'
'all objects, including type objects. If the implicit lookup '
'of these\n'
'methods used the conventional lookup process, they would '
'fail when\n'
'invoked on the type object itself:\n'
'\n'
' >>> 1 .__hash__() == hash(1)\n'
' True\n'
' >>> int.__hash__() == hash(int)\n'
' Traceback (most recent call last):\n'
' File "<stdin>", line 1, in <module>\n'
" TypeError: descriptor '__hash__' of 'int' object needs an "
'argument\n'
'\n'
'Incorrectly attempting to invoke an unbound method of a '
'class in this\n'
'way is sometimes referred to as ‘metaclass confusion’, and '
'is avoided\n'
'by bypassing the instance when looking up special methods:\n'
'\n'
' >>> type(1).__hash__(1) == hash(1)\n'
' True\n'
' >>> type(int).__hash__(int) == hash(int)\n'
' True\n'
'\n'
'In addition to bypassing any instance attributes in the '
'interest of\n'
'correctness, implicit special method lookup generally also '
'bypasses\n'
'the "__getattribute__()" method even of the object’s '
'metaclass:\n'
'\n'
' >>> class Meta(type):\n'
' ... def __getattribute__(*args):\n'
' ... print("Metaclass getattribute invoked")\n'
' ... return type.__getattribute__(*args)\n'
' ...\n'
' >>> class C(object, metaclass=Meta):\n'
' ... def __len__(self):\n'
' ... return 10\n'
' ... def __getattribute__(*args):\n'
' ... print("Class getattribute invoked")\n'
' ... return object.__getattribute__(*args)\n'
' ...\n'
' >>> c = C()\n'
' >>> c.__len__() # Explicit lookup via '
'instance\n'
' Class getattribute invoked\n'
' 10\n'
' >>> type(c).__len__(c) # Explicit lookup via '
'type\n'
' Metaclass getattribute invoked\n'
' 10\n'
' >>> len(c) # Implicit lookup\n'
' 10\n'
'\n'
'Bypassing the "__getattribute__()" machinery in this fashion '
'provides\n'
'significant scope for speed optimisations within the '
'interpreter, at\n'
'the cost of some flexibility in the handling of special '
'methods (the\n'
'special method *must* be set on the class object itself in '
'order to be\n'
'consistently invoked by the interpreter).\n',
'string-methods': 'String Methods\n'
'**************\n'
'\n'
'Strings implement all of the common sequence operations, '
'along with\n'
'the additional methods described below.\n'
'\n'
'Strings also support two styles of string formatting, one '
'providing a\n'
'large degree of flexibility and customization (see '
'"str.format()",\n'
'Format String Syntax and Custom String Formatting) and the '
'other based\n'
'on C "printf" style formatting that handles a narrower '
'range of types\n'
'and is slightly harder to use correctly, but is often '
'faster for the\n'
'cases it can handle (printf-style String Formatting).\n'
'\n'
'The Text Processing Services section of the standard '
'library covers a\n'
'number of other modules that provide various text related '
'utilities\n'
'(including regular expression support in the "re" '
'module).\n'
'\n'
'str.capitalize()\n'
'\n'
' Return a copy of the string with its first character '
'capitalized\n'
' and the rest lowercased.\n'
'\n'
'str.casefold()\n'
'\n'
' Return a casefolded copy of the string. Casefolded '
'strings may be\n'
' used for caseless matching.\n'
'\n'
' Casefolding is similar to lowercasing but more '
'aggressive because\n'
' it is intended to remove all case distinctions in a '
'string. For\n'
' example, the German lowercase letter "\'ß\'" is '
'equivalent to ""ss"".\n'
' Since it is already lowercase, "lower()" would do '
'nothing to "\'ß\'";\n'
' "casefold()" converts it to ""ss"".\n'
'\n'
' The casefolding algorithm is described in section 3.13 '
'of the\n'
' Unicode Standard.\n'
'\n'
' New in version 3.3.\n'
'\n'
'str.center(width[, fillchar])\n'
'\n'
' Return centered in a string of length *width*. Padding '
'is done\n'
' using the specified *fillchar* (default is an ASCII '
'space). The\n'
' original string is returned if *width* is less than or '
'equal to\n'
' "len(s)".\n'
'\n'
'str.count(sub[, start[, end]])\n'
'\n'
' Return the number of non-overlapping occurrences of '
'substring *sub*\n'
' in the range [*start*, *end*]. Optional arguments '
'*start* and\n'
' *end* are interpreted as in slice notation.\n'
'\n'
'str.encode(encoding="utf-8", errors="strict")\n'
'\n'
' Return an encoded version of the string as a bytes '
'object. Default\n'
' encoding is "\'utf-8\'". *errors* may be given to set a '
'different\n'
' error handling scheme. The default for *errors* is '
'"\'strict\'",\n'
' meaning that encoding errors raise a "UnicodeError". '
'Other possible\n'
' values are "\'ignore\'", "\'replace\'", '
'"\'xmlcharrefreplace\'",\n'
' "\'backslashreplace\'" and any other name registered '
'via\n'
' "codecs.register_error()", see section Error Handlers. '
'For a list\n'
' of possible encodings, see section Standard Encodings.\n'
'\n'
' Changed in version 3.1: Support for keyword arguments '
'added.\n'
'\n'
'str.endswith(suffix[, start[, end]])\n'
'\n'
' Return "True" if the string ends with the specified '
'*suffix*,\n'
' otherwise return "False". *suffix* can also be a tuple '
'of suffixes\n'
' to look for. With optional *start*, test beginning at '
'that\n'
' position. With optional *end*, stop comparing at that '
'position.\n'
'\n'
'str.expandtabs(tabsize=8)\n'
'\n'
' Return a copy of the string where all tab characters '
'are replaced\n'
' by one or more spaces, depending on the current column '
'and the\n'
' given tab size. Tab positions occur every *tabsize* '
'characters\n'
' (default is 8, giving tab positions at columns 0, 8, 16 '
'and so on).\n'
' To expand the string, the current column is set to zero '
'and the\n'
' string is examined character by character. If the '
'character is a\n'
' tab ("\\t"), one or more space characters are inserted '
'in the result\n'
' until the current column is equal to the next tab '
'position. (The\n'
' tab character itself is not copied.) If the character '
'is a newline\n'
' ("\\n") or return ("\\r"), it is copied and the current '
'column is\n'
' reset to zero. Any other character is copied unchanged '
'and the\n'
' current column is incremented by one regardless of how '
'the\n'
' character is represented when printed.\n'
'\n'
" >>> '01\\t012\\t0123\\t01234'.expandtabs()\n"
" '01 012 0123 01234'\n"
" >>> '01\\t012\\t0123\\t01234'.expandtabs(4)\n"
" '01 012 0123 01234'\n"
'\n'
'str.find(sub[, start[, end]])\n'
'\n'
' Return the lowest index in the string where substring '
'*sub* is\n'
' found within the slice "s[start:end]". Optional '
'arguments *start*\n'
' and *end* are interpreted as in slice notation. Return '
'"-1" if\n'
' *sub* is not found.\n'
'\n'
' Note: The "find()" method should be used only if you '
'need to know\n'
' the position of *sub*. To check if *sub* is a '
'substring or not,\n'
' use the "in" operator:\n'
'\n'
" >>> 'Py' in 'Python'\n"
' True\n'
'\n'
'str.format(*args, **kwargs)\n'
'\n'
' Perform a string formatting operation. The string on '
'which this\n'
' method is called can contain literal text or '
'replacement fields\n'
' delimited by braces "{}". Each replacement field '
'contains either\n'
' the numeric index of a positional argument, or the name '
'of a\n'
' keyword argument. Returns a copy of the string where '
'each\n'
' replacement field is replaced with the string value of '
'the\n'
' corresponding argument.\n'
'\n'
' >>> "The sum of 1 + 2 is {0}".format(1+2)\n'
" 'The sum of 1 + 2 is 3'\n"
'\n'
' See Format String Syntax for a description of the '
'various\n'
' formatting options that can be specified in format '
'strings.\n'
'\n'
' Note: When formatting a number ("int", "float", "float" '
'and\n'
' subclasses) with the "n" type (ex: '
'"\'{:n}\'.format(1234)"), the\n'
' function sets temporarily the "LC_CTYPE" locale to '
'the\n'
' "LC_NUMERIC" locale to decode "decimal_point" and '
'"thousands_sep"\n'
' fields of "localeconv()" if they are non-ASCII or '
'longer than 1\n'
' byte, and the "LC_NUMERIC" locale is different than '
'the\n'
' "LC_CTYPE" locale. This temporary change affects '
'other threads.\n'
'\n'
' Changed in version 3.6.5: When formatting a number with '
'the "n"\n'
' type, the function sets temporarily the "LC_CTYPE" '
'locale to the\n'
' "LC_NUMERIC" locale in some cases.\n'
'\n'
'str.format_map(mapping)\n'
'\n'
' Similar to "str.format(**mapping)", except that '
'"mapping" is used\n'
' directly and not copied to a "dict". This is useful if '
'for example\n'
' "mapping" is a dict subclass:\n'
'\n'
' >>> class Default(dict):\n'
' ... def __missing__(self, key):\n'
' ... return key\n'
' ...\n'
" >>> '{name} was born in "
"{country}'.format_map(Default(name='Guido'))\n"
" 'Guido was born in country'\n"
'\n'
' New in version 3.2.\n'
'\n'
'str.index(sub[, start[, end]])\n'
'\n'
' Like "find()", but raise "ValueError" when the '
'substring is not\n'
' found.\n'
'\n'
'str.isalnum()\n'
'\n'
' Return true if all characters in the string are '
'alphanumeric and\n'
' there is at least one character, false otherwise. A '
'character "c"\n'
' is alphanumeric if one of the following returns '
'"True":\n'
' "c.isalpha()", "c.isdecimal()", "c.isdigit()", or '
'"c.isnumeric()".\n'
'\n'
'str.isalpha()\n'
'\n'
' Return true if all characters in the string are '
'alphabetic and\n'
' there is at least one character, false otherwise. '
'Alphabetic\n'
' characters are those characters defined in the Unicode '
'character\n'
' database as “Letter”, i.e., those with general category '
'property\n'
' being one of “Lm”, “Lt”, “Lu”, “Ll”, or “Lo”. Note '
'that this is\n'
' different from the “Alphabetic” property defined in the '
'Unicode\n'
' Standard.\n'
'\n'
'str.isdecimal()\n'
'\n'
' Return true if all characters in the string are decimal '
'characters\n'
' and there is at least one character, false otherwise. '
'Decimal\n'
' characters are those that can be used to form numbers '
'in base 10,\n'
' e.g. U+0660, ARABIC-INDIC DIGIT ZERO. Formally a '
'decimal character\n'
' is a character in the Unicode General Category “Nd”.\n'
'\n'
'str.isdigit()\n'
'\n'
' Return true if all characters in the string are digits '
'and there is\n'
' at least one character, false otherwise. Digits '
'include decimal\n'
' characters and digits that need special handling, such '
'as the\n'
' compatibility superscript digits. This covers digits '
'which cannot\n'
' be used to form numbers in base 10, like the Kharosthi '
'numbers.\n'
' Formally, a digit is a character that has the property '
'value\n'
' Numeric_Type=Digit or Numeric_Type=Decimal.\n'
'\n'
'str.isidentifier()\n'
'\n'
' Return true if the string is a valid identifier '
'according to the\n'
' language definition, section Identifiers and keywords.\n'
'\n'
' Use "keyword.iskeyword()" to test for reserved '
'identifiers such as\n'
' "def" and "class".\n'
'\n'
'str.islower()\n'
'\n'
' Return true if all cased characters [4] in the string '
'are lowercase\n'
' and there is at least one cased character, false '
'otherwise.\n'
'\n'
'str.isnumeric()\n'
'\n'
' Return true if all characters in the string are numeric '
'characters,\n'
' and there is at least one character, false otherwise. '
'Numeric\n'
' characters include digit characters, and all characters '
'that have\n'
' the Unicode numeric value property, e.g. U+2155, VULGAR '
'FRACTION\n'
' ONE FIFTH. Formally, numeric characters are those with '
'the\n'
' property value Numeric_Type=Digit, Numeric_Type=Decimal '
'or\n'
' Numeric_Type=Numeric.\n'
'\n'
'str.isprintable()\n'
'\n'
' Return true if all characters in the string are '
'printable or the\n'
' string is empty, false otherwise. Nonprintable '
'characters are\n'
' those characters defined in the Unicode character '
'database as\n'
' “Other” or “Separator”, excepting the ASCII space '
'(0x20) which is\n'
' considered printable. (Note that printable characters '
'in this\n'
' context are those which should not be escaped when '
'"repr()" is\n'
' invoked on a string. It has no bearing on the handling '
'of strings\n'
' written to "sys.stdout" or "sys.stderr".)\n'
'\n'
'str.isspace()\n'
'\n'
' Return true if there are only whitespace characters in '
'the string\n'
' and there is at least one character, false otherwise. '
'Whitespace\n'
' characters are those characters defined in the Unicode '
'character\n'
' database as “Other” or “Separator” and those with '
'bidirectional\n'
' property being one of “WS”, “B”, or “S”.\n'
'\n'
'str.istitle()\n'
'\n'
' Return true if the string is a titlecased string and '
'there is at\n'
' least one character, for example uppercase characters '
'may only\n'
' follow uncased characters and lowercase characters only '
'cased ones.\n'
' Return false otherwise.\n'
'\n'
'str.isupper()\n'
'\n'
' Return true if all cased characters [4] in the string '
'are uppercase\n'
' and there is at least one cased character, false '
'otherwise.\n'
'\n'
'str.join(iterable)\n'
'\n'
' Return a string which is the concatenation of the '
'strings in\n'
' *iterable*. A "TypeError" will be raised if there are '
'any non-\n'
' string values in *iterable*, including "bytes" '
'objects. The\n'
' separator between elements is the string providing this '
'method.\n'
'\n'
'str.ljust(width[, fillchar])\n'
'\n'
' Return the string left justified in a string of length '
'*width*.\n'
' Padding is done using the specified *fillchar* (default '
'is an ASCII\n'
' space). The original string is returned if *width* is '
'less than or\n'
' equal to "len(s)".\n'
'\n'
'str.lower()\n'
'\n'
' Return a copy of the string with all the cased '
'characters [4]\n'
' converted to lowercase.\n'
'\n'
' The lowercasing algorithm used is described in section '
'3.13 of the\n'
' Unicode Standard.\n'
'\n'
'str.lstrip([chars])\n'
'\n'
' Return a copy of the string with leading characters '
'removed. The\n'
' *chars* argument is a string specifying the set of '
'characters to be\n'
' removed. If omitted or "None", the *chars* argument '
'defaults to\n'
' removing whitespace. The *chars* argument is not a '
'prefix; rather,\n'
' all combinations of its values are stripped:\n'
'\n'
" >>> ' spacious '.lstrip()\n"
" 'spacious '\n"
" >>> 'www.example.com'.lstrip('cmowz.')\n"
" 'example.com'\n"
'\n'
'static str.maketrans(x[, y[, z]])\n'
'\n'
' This static method returns a translation table usable '
'for\n'
' "str.translate()".\n'
'\n'
' If there is only one argument, it must be a dictionary '
'mapping\n'
' Unicode ordinals (integers) or characters (strings of '
'length 1) to\n'
' Unicode ordinals, strings (of arbitrary lengths) or '
'"None".\n'
' Character keys will then be converted to ordinals.\n'
'\n'
' If there are two arguments, they must be strings of '
'equal length,\n'
' and in the resulting dictionary, each character in x '
'will be mapped\n'
' to the character at the same position in y. If there '
'is a third\n'
' argument, it must be a string, whose characters will be '
'mapped to\n'
' "None" in the result.\n'
'\n'
'str.partition(sep)\n'
'\n'
' Split the string at the first occurrence of *sep*, and '
'return a\n'
' 3-tuple containing the part before the separator, the '
'separator\n'
' itself, and the part after the separator. If the '
'separator is not\n'
' found, return a 3-tuple containing the string itself, '
'followed by\n'
' two empty strings.\n'
'\n'
'str.replace(old, new[, count])\n'
'\n'
' Return a copy of the string with all occurrences of '
'substring *old*\n'
' replaced by *new*. If the optional argument *count* is '
'given, only\n'
' the first *count* occurrences are replaced.\n'
'\n'
'str.rfind(sub[, start[, end]])\n'
'\n'
' Return the highest index in the string where substring '
'*sub* is\n'
' found, such that *sub* is contained within '
'"s[start:end]".\n'
' Optional arguments *start* and *end* are interpreted as '
'in slice\n'
' notation. Return "-1" on failure.\n'
'\n'
'str.rindex(sub[, start[, end]])\n'
'\n'
' Like "rfind()" but raises "ValueError" when the '
'substring *sub* is\n'
' not found.\n'
'\n'
'str.rjust(width[, fillchar])\n'
'\n'
' Return the string right justified in a string of length '
'*width*.\n'
' Padding is done using the specified *fillchar* (default '
'is an ASCII\n'
' space). The original string is returned if *width* is '
'less than or\n'
' equal to "len(s)".\n'
'\n'
'str.rpartition(sep)\n'
'\n'
' Split the string at the last occurrence of *sep*, and '
'return a\n'
' 3-tuple containing the part before the separator, the '
'separator\n'
' itself, and the part after the separator. If the '
'separator is not\n'
' found, return a 3-tuple containing two empty strings, '
'followed by\n'
' the string itself.\n'
'\n'
'str.rsplit(sep=None, maxsplit=-1)\n'
'\n'
' Return a list of the words in the string, using *sep* '
'as the\n'
' delimiter string. If *maxsplit* is given, at most '
'*maxsplit* splits\n'
' are done, the *rightmost* ones. If *sep* is not '
'specified or\n'
' "None", any whitespace string is a separator. Except '
'for splitting\n'
' from the right, "rsplit()" behaves like "split()" which '
'is\n'
' described in detail below.\n'
'\n'
'str.rstrip([chars])\n'
'\n'
' Return a copy of the string with trailing characters '
'removed. The\n'
' *chars* argument is a string specifying the set of '
'characters to be\n'
' removed. If omitted or "None", the *chars* argument '
'defaults to\n'
' removing whitespace. The *chars* argument is not a '
'suffix; rather,\n'
' all combinations of its values are stripped:\n'
'\n'
" >>> ' spacious '.rstrip()\n"
" ' spacious'\n"
" >>> 'mississippi'.rstrip('ipz')\n"
" 'mississ'\n"
'\n'
'str.split(sep=None, maxsplit=-1)\n'
'\n'
' Return a list of the words in the string, using *sep* '
'as the\n'
' delimiter string. If *maxsplit* is given, at most '
'*maxsplit*\n'
' splits are done (thus, the list will have at most '
'"maxsplit+1"\n'
' elements). If *maxsplit* is not specified or "-1", '
'then there is\n'
' no limit on the number of splits (all possible splits '
'are made).\n'
'\n'
' If *sep* is given, consecutive delimiters are not '
'grouped together\n'
' and are deemed to delimit empty strings (for example,\n'
' "\'1,,2\'.split(\',\')" returns "[\'1\', \'\', '
'\'2\']"). The *sep* argument\n'
' may consist of multiple characters (for example,\n'
' "\'1<>2<>3\'.split(\'<>\')" returns "[\'1\', \'2\', '
'\'3\']"). Splitting an\n'
' empty string with a specified separator returns '
'"[\'\']".\n'
'\n'
' For example:\n'
'\n'
" >>> '1,2,3'.split(',')\n"
" ['1', '2', '3']\n"
" >>> '1,2,3'.split(',', maxsplit=1)\n"
" ['1', '2,3']\n"
" >>> '1,2,,3,'.split(',')\n"
" ['1', '2', '', '3', '']\n"
'\n'
' If *sep* is not specified or is "None", a different '
'splitting\n'
' algorithm is applied: runs of consecutive whitespace '
'are regarded\n'
' as a single separator, and the result will contain no '
'empty strings\n'
' at the start or end if the string has leading or '
'trailing\n'
' whitespace. Consequently, splitting an empty string or '
'a string\n'
' consisting of just whitespace with a "None" separator '
'returns "[]".\n'
'\n'
' For example:\n'
'\n'
" >>> '1 2 3'.split()\n"
" ['1', '2', '3']\n"
" >>> '1 2 3'.split(maxsplit=1)\n"
" ['1', '2 3']\n"
" >>> ' 1 2 3 '.split()\n"
" ['1', '2', '3']\n"
'\n'
'str.splitlines([keepends])\n'
'\n'
' Return a list of the lines in the string, breaking at '
'line\n'
' boundaries. Line breaks are not included in the '
'resulting list\n'
' unless *keepends* is given and true.\n'
'\n'
' This method splits on the following line boundaries. '
'In\n'
' particular, the boundaries are a superset of *universal '
'newlines*.\n'
'\n'
' '
'+-------------------------+-------------------------------+\n'
' | Representation | '
'Description |\n'
' '
'+=========================+===============================+\n'
' | "\\n" | Line '
'Feed |\n'
' '
'+-------------------------+-------------------------------+\n'
' | "\\r" | Carriage '
'Return |\n'
' '
'+-------------------------+-------------------------------+\n'
' | "\\r\\n" | Carriage Return + Line '
'Feed |\n'
' '
'+-------------------------+-------------------------------+\n'
' | "\\v" or "\\x0b" | Line '
'Tabulation |\n'
' '
'+-------------------------+-------------------------------+\n'
' | "\\f" or "\\x0c" | Form '
'Feed |\n'
' '
'+-------------------------+-------------------------------+\n'
' | "\\x1c" | File '
'Separator |\n'
' '
'+-------------------------+-------------------------------+\n'
' | "\\x1d" | Group '
'Separator |\n'
' '
'+-------------------------+-------------------------------+\n'
' | "\\x1e" | Record '
'Separator |\n'
' '
'+-------------------------+-------------------------------+\n'
' | "\\x85" | Next Line (C1 Control '
'Code) |\n'
' '
'+-------------------------+-------------------------------+\n'
' | "\\u2028" | Line '
'Separator |\n'
' '
'+-------------------------+-------------------------------+\n'
' | "\\u2029" | Paragraph '
'Separator |\n'
' '
'+-------------------------+-------------------------------+\n'
'\n'
' Changed in version 3.2: "\\v" and "\\f" added to list '
'of line\n'
' boundaries.\n'
'\n'
' For example:\n'
'\n'
" >>> 'ab c\\n\\nde fg\\rkl\\r\\n'.splitlines()\n"
" ['ab c', '', 'de fg', 'kl']\n"
" >>> 'ab c\\n\\nde "
"fg\\rkl\\r\\n'.splitlines(keepends=True)\n"
" ['ab c\\n', '\\n', 'de fg\\r', 'kl\\r\\n']\n"
'\n'
' Unlike "split()" when a delimiter string *sep* is '
'given, this\n'
' method returns an empty list for the empty string, and '
'a terminal\n'
' line break does not result in an extra line:\n'
'\n'
' >>> "".splitlines()\n'
' []\n'
' >>> "One line\\n".splitlines()\n'
" ['One line']\n"
'\n'
' For comparison, "split(\'\\n\')" gives:\n'
'\n'
" >>> ''.split('\\n')\n"
" ['']\n"
" >>> 'Two lines\\n'.split('\\n')\n"
" ['Two lines', '']\n"
'\n'
'str.startswith(prefix[, start[, end]])\n'
'\n'
' Return "True" if string starts with the *prefix*, '
'otherwise return\n'
' "False". *prefix* can also be a tuple of prefixes to '
'look for.\n'
' With optional *start*, test string beginning at that '
'position.\n'
' With optional *end*, stop comparing string at that '
'position.\n'
'\n'
'str.strip([chars])\n'
'\n'
' Return a copy of the string with the leading and '
'trailing\n'
' characters removed. The *chars* argument is a string '
'specifying the\n'
' set of characters to be removed. If omitted or "None", '
'the *chars*\n'
' argument defaults to removing whitespace. The *chars* '
'argument is\n'
' not a prefix or suffix; rather, all combinations of its '
'values are\n'
' stripped:\n'
'\n'
" >>> ' spacious '.strip()\n"
" 'spacious'\n"
" >>> 'www.example.com'.strip('cmowz.')\n"
" 'example'\n"
'\n'
' The outermost leading and trailing *chars* argument '
'values are\n'
' stripped from the string. Characters are removed from '
'the leading\n'
' end until reaching a string character that is not '
'contained in the\n'
' set of characters in *chars*. A similar action takes '
'place on the\n'
' trailing end. For example:\n'
'\n'
" >>> comment_string = '#....... Section 3.2.1 Issue "
"#32 .......'\n"
" >>> comment_string.strip('.#! ')\n"
" 'Section 3.2.1 Issue #32'\n"
'\n'
'str.swapcase()\n'
'\n'
' Return a copy of the string with uppercase characters '
'converted to\n'
' lowercase and vice versa. Note that it is not '
'necessarily true that\n'
' "s.swapcase().swapcase() == s".\n'
'\n'
'str.title()\n'
'\n'
' Return a titlecased version of the string where words '
'start with an\n'
' uppercase character and the remaining characters are '
'lowercase.\n'
'\n'
' For example:\n'
'\n'
" >>> 'Hello world'.title()\n"
" 'Hello World'\n"
'\n'
' The algorithm uses a simple language-independent '
'definition of a\n'
' word as groups of consecutive letters. The definition '
'works in\n'
' many contexts but it means that apostrophes in '
'contractions and\n'
' possessives form word boundaries, which may not be the '
'desired\n'
' result:\n'
'\n'
' >>> "they\'re bill\'s friends from the UK".title()\n'
' "They\'Re Bill\'S Friends From The Uk"\n'
'\n'
' A workaround for apostrophes can be constructed using '
'regular\n'
' expressions:\n'
'\n'
' >>> import re\n'
' >>> def titlecase(s):\n'
' ... return re.sub(r"[A-Za-z]+(\'[A-Za-z]+)?",\n'
' ... lambda mo: '
'mo.group(0)[0].upper() +\n'
' ... '
'mo.group(0)[1:].lower(),\n'
' ... s)\n'
' ...\n'
' >>> titlecase("they\'re bill\'s friends.")\n'
' "They\'re Bill\'s Friends."\n'
'\n'
'str.translate(table)\n'
'\n'
' Return a copy of the string in which each character has '
'been mapped\n'
' through the given translation table. The table must be '
'an object\n'
' that implements indexing via "__getitem__()", typically '
'a *mapping*\n'
' or *sequence*. When indexed by a Unicode ordinal (an '
'integer), the\n'
' table object can do any of the following: return a '
'Unicode ordinal\n'
' or a string, to map the character to one or more other '
'characters;\n'
' return "None", to delete the character from the return '
'string; or\n'
' raise a "LookupError" exception, to map the character '
'to itself.\n'
'\n'
' You can use "str.maketrans()" to create a translation '
'map from\n'
' character-to-character mappings in different formats.\n'
'\n'
' See also the "codecs" module for a more flexible '
'approach to custom\n'
' character mappings.\n'
'\n'
'str.upper()\n'
'\n'
' Return a copy of the string with all the cased '
'characters [4]\n'
' converted to uppercase. Note that '
'"str.upper().isupper()" might be\n'
' "False" if "s" contains uncased characters or if the '
'Unicode\n'
' category of the resulting character(s) is not “Lu” '
'(Letter,\n'
' uppercase), but e.g. “Lt” (Letter, titlecase).\n'
'\n'
' The uppercasing algorithm used is described in section '
'3.13 of the\n'
' Unicode Standard.\n'
'\n'
'str.zfill(width)\n'
'\n'
' Return a copy of the string left filled with ASCII '
'"\'0\'" digits to\n'
' make a string of length *width*. A leading sign prefix\n'
' ("\'+\'"/"\'-\'") is handled by inserting the padding '
'*after* the sign\n'
' character rather than before. The original string is '
'returned if\n'
' *width* is less than or equal to "len(s)".\n'
'\n'
' For example:\n'
'\n'
' >>> "42".zfill(5)\n'
" '00042'\n"
' >>> "-42".zfill(5)\n'
" '-0042'\n",
'strings': 'String and Bytes literals\n'
'*************************\n'
'\n'
'String literals are described by the following lexical '
'definitions:\n'
'\n'
' stringliteral ::= [stringprefix](shortstring | longstring)\n'
' stringprefix ::= "r" | "u" | "R" | "U" | "f" | "F"\n'
' | "fr" | "Fr" | "fR" | "FR" | "rf" | "rF" | '
'"Rf" | "RF"\n'
' shortstring ::= "\'" shortstringitem* "\'" | \'"\' '
'shortstringitem* \'"\'\n'
' longstring ::= "\'\'\'" longstringitem* "\'\'\'" | '
'\'"""\' longstringitem* \'"""\'\n'
' shortstringitem ::= shortstringchar | stringescapeseq\n'
' longstringitem ::= longstringchar | stringescapeseq\n'
' shortstringchar ::= <any source character except "\\" or '
'newline or the quote>\n'
' longstringchar ::= <any source character except "\\">\n'
' stringescapeseq ::= "\\" <any source character>\n'
'\n'
' bytesliteral ::= bytesprefix(shortbytes | longbytes)\n'
' bytesprefix ::= "b" | "B" | "br" | "Br" | "bR" | "BR" | '
'"rb" | "rB" | "Rb" | "RB"\n'
' shortbytes ::= "\'" shortbytesitem* "\'" | \'"\' '
'shortbytesitem* \'"\'\n'
' longbytes ::= "\'\'\'" longbytesitem* "\'\'\'" | \'"""\' '
'longbytesitem* \'"""\'\n'
' shortbytesitem ::= shortbyteschar | bytesescapeseq\n'
' longbytesitem ::= longbyteschar | bytesescapeseq\n'
' shortbyteschar ::= <any ASCII character except "\\" or newline '
'or the quote>\n'
' longbyteschar ::= <any ASCII character except "\\">\n'
' bytesescapeseq ::= "\\" <any ASCII character>\n'
'\n'
'One syntactic restriction not indicated by these productions is '
'that\n'
'whitespace is not allowed between the "stringprefix" or '
'"bytesprefix"\n'
'and the rest of the literal. The source character set is defined '
'by\n'
'the encoding declaration; it is UTF-8 if no encoding declaration '
'is\n'
'given in the source file; see section Encoding declarations.\n'
'\n'
'In plain English: Both types of literals can be enclosed in '
'matching\n'
'single quotes ("\'") or double quotes ("""). They can also be '
'enclosed\n'
'in matching groups of three single or double quotes (these are\n'
'generally referred to as *triple-quoted strings*). The '
'backslash\n'
'("\\") character is used to escape characters that otherwise have '
'a\n'
'special meaning, such as newline, backslash itself, or the quote\n'
'character.\n'
'\n'
'Bytes literals are always prefixed with "\'b\'" or "\'B\'"; they '
'produce\n'
'an instance of the "bytes" type instead of the "str" type. They '
'may\n'
'only contain ASCII characters; bytes with a numeric value of 128 '
'or\n'
'greater must be expressed with escapes.\n'
'\n'
'Both string and bytes literals may optionally be prefixed with a\n'
'letter "\'r\'" or "\'R\'"; such strings are called *raw strings* '
'and treat\n'
'backslashes as literal characters. As a result, in string '
'literals,\n'
'"\'\\U\'" and "\'\\u\'" escapes in raw strings are not treated '
'specially.\n'
'Given that Python 2.x’s raw unicode literals behave differently '
'than\n'
'Python 3.x’s the "\'ur\'" syntax is not supported.\n'
'\n'
'New in version 3.3: The "\'rb\'" prefix of raw bytes literals has '
'been\n'
'added as a synonym of "\'br\'".\n'
'\n'
'New in version 3.3: Support for the unicode legacy literal\n'
'("u\'value\'") was reintroduced to simplify the maintenance of '
'dual\n'
'Python 2.x and 3.x codebases. See **PEP 414** for more '
'information.\n'
'\n'
'A string literal with "\'f\'" or "\'F\'" in its prefix is a '
'*formatted\n'
'string literal*; see Formatted string literals. The "\'f\'" may '
'be\n'
'combined with "\'r\'", but not with "\'b\'" or "\'u\'", therefore '
'raw\n'
'formatted strings are possible, but formatted bytes literals are '
'not.\n'
'\n'
'In triple-quoted literals, unescaped newlines and quotes are '
'allowed\n'
'(and are retained), except that three unescaped quotes in a row\n'
'terminate the literal. (A “quote” is the character used to open '
'the\n'
'literal, i.e. either "\'" or """.)\n'
'\n'
'Unless an "\'r\'" or "\'R\'" prefix is present, escape sequences '
'in string\n'
'and bytes literals are interpreted according to rules similar to '
'those\n'
'used by Standard C. The recognized escape sequences are:\n'
'\n'
'+-------------------+-----------------------------------+---------+\n'
'| Escape Sequence | Meaning | Notes '
'|\n'
'+===================+===================================+=========+\n'
'| "\\newline" | Backslash and newline ignored '
'| |\n'
'+-------------------+-----------------------------------+---------+\n'
'| "\\\\" | Backslash ("\\") '
'| |\n'
'+-------------------+-----------------------------------+---------+\n'
'| "\\\'" | Single quote ("\'") '
'| |\n'
'+-------------------+-----------------------------------+---------+\n'
'| "\\"" | Double quote (""") '
'| |\n'
'+-------------------+-----------------------------------+---------+\n'
'| "\\a" | ASCII Bell (BEL) '
'| |\n'
'+-------------------+-----------------------------------+---------+\n'
'| "\\b" | ASCII Backspace (BS) '
'| |\n'
'+-------------------+-----------------------------------+---------+\n'
'| "\\f" | ASCII Formfeed (FF) '
'| |\n'
'+-------------------+-----------------------------------+---------+\n'
'| "\\n" | ASCII Linefeed (LF) '
'| |\n'
'+-------------------+-----------------------------------+---------+\n'
'| "\\r" | ASCII Carriage Return (CR) '
'| |\n'
'+-------------------+-----------------------------------+---------+\n'
'| "\\t" | ASCII Horizontal Tab (TAB) '
'| |\n'
'+-------------------+-----------------------------------+---------+\n'
'| "\\v" | ASCII Vertical Tab (VT) '
'| |\n'
'+-------------------+-----------------------------------+---------+\n'
'| "\\ooo" | Character with octal value *ooo* | '
'(1,3) |\n'
'+-------------------+-----------------------------------+---------+\n'
'| "\\xhh" | Character with hex value *hh* | '
'(2,3) |\n'
'+-------------------+-----------------------------------+---------+\n'
'\n'
'Escape sequences only recognized in string literals are:\n'
'\n'
'+-------------------+-----------------------------------+---------+\n'
'| Escape Sequence | Meaning | Notes '
'|\n'
'+===================+===================================+=========+\n'
'| "\\N{name}" | Character named *name* in the | '
'(4) |\n'
'| | Unicode database | '
'|\n'
'+-------------------+-----------------------------------+---------+\n'
'| "\\uxxxx" | Character with 16-bit hex value | '
'(5) |\n'
'| | *xxxx* | '
'|\n'
'+-------------------+-----------------------------------+---------+\n'
'| "\\Uxxxxxxxx" | Character with 32-bit hex value | '
'(6) |\n'
'| | *xxxxxxxx* | '
'|\n'
'+-------------------+-----------------------------------+---------+\n'
'\n'
'Notes:\n'
'\n'
'1. As in Standard C, up to three octal digits are accepted.\n'
'\n'
'2. Unlike in Standard C, exactly two hex digits are required.\n'
'\n'
'3. In a bytes literal, hexadecimal and octal escapes denote the\n'
' byte with the given value. In a string literal, these escapes\n'
' denote a Unicode character with the given value.\n'
'\n'
'4. Changed in version 3.3: Support for name aliases [1] has been\n'
' added.\n'
'\n'
'5. Exactly four hex digits are required.\n'
'\n'
'6. Any Unicode character can be encoded this way. Exactly eight\n'
' hex digits are required.\n'
'\n'
'Unlike Standard C, all unrecognized escape sequences are left in '
'the\n'
'string unchanged, i.e., *the backslash is left in the result*. '
'(This\n'
'behavior is useful when debugging: if an escape sequence is '
'mistyped,\n'
'the resulting output is more easily recognized as broken.) It is '
'also\n'
'important to note that the escape sequences only recognized in '
'string\n'
'literals fall into the category of unrecognized escapes for '
'bytes\n'
'literals.\n'
'\n'
' Changed in version 3.6: Unrecognized escape sequences produce '
'a\n'
' DeprecationWarning. In some future version of Python they '
'will be\n'
' a SyntaxError.\n'
'\n'
'Even in a raw literal, quotes can be escaped with a backslash, '
'but the\n'
'backslash remains in the result; for example, "r"\\""" is a '
'valid\n'
'string literal consisting of two characters: a backslash and a '
'double\n'
'quote; "r"\\"" is not a valid string literal (even a raw string '
'cannot\n'
'end in an odd number of backslashes). Specifically, *a raw '
'literal\n'
'cannot end in a single backslash* (since the backslash would '
'escape\n'
'the following quote character). Note also that a single '
'backslash\n'
'followed by a newline is interpreted as those two characters as '
'part\n'
'of the literal, *not* as a line continuation.\n',
'subscriptions': 'Subscriptions\n'
'*************\n'
'\n'
'A subscription selects an item of a sequence (string, tuple '
'or list)\n'
'or mapping (dictionary) object:\n'
'\n'
' subscription ::= primary "[" expression_list "]"\n'
'\n'
'The primary must evaluate to an object that supports '
'subscription\n'
'(lists or dictionaries for example). User-defined objects '
'can support\n'
'subscription by defining a "__getitem__()" method.\n'
'\n'
'For built-in objects, there are two types of objects that '
'support\n'
'subscription:\n'
'\n'
'If the primary is a mapping, the expression list must '
'evaluate to an\n'
'object whose value is one of the keys of the mapping, and '
'the\n'
'subscription selects the value in the mapping that '
'corresponds to that\n'
'key. (The expression list is a tuple except if it has '
'exactly one\n'
'item.)\n'
'\n'
'If the primary is a sequence, the expression (list) must '
'evaluate to\n'
'an integer or a slice (as discussed in the following '
'section).\n'
'\n'
'The formal syntax makes no special provision for negative '
'indices in\n'
'sequences; however, built-in sequences all provide a '
'"__getitem__()"\n'
'method that interprets negative indices by adding the '
'length of the\n'
'sequence to the index (so that "x[-1]" selects the last '
'item of "x").\n'
'The resulting value must be a nonnegative integer less than '
'the number\n'
'of items in the sequence, and the subscription selects the '
'item whose\n'
'index is that value (counting from zero). Since the support '
'for\n'
'negative indices and slicing occurs in the object’s '
'"__getitem__()"\n'
'method, subclasses overriding this method will need to '
'explicitly add\n'
'that support.\n'
'\n'
'A string’s items are characters. A character is not a '
'separate data\n'
'type but a string of exactly one character.\n',
'truth': 'Truth Value Testing\n'
'*******************\n'
'\n'
'Any object can be tested for truth value, for use in an "if" or\n'
'"while" condition or as operand of the Boolean operations below.\n'
'\n'
'By default, an object is considered true unless its class defines\n'
'either a "__bool__()" method that returns "False" or a "__len__()"\n'
'method that returns zero, when called with the object. [1] Here '
'are\n'
'most of the built-in objects considered false:\n'
'\n'
'* constants defined to be false: "None" and "False".\n'
'\n'
'* zero of any numeric type: "0", "0.0", "0j", "Decimal(0)",\n'
' "Fraction(0, 1)"\n'
'\n'
'* empty sequences and collections: "\'\'", "()", "[]", "{}", '
'"set()",\n'
' "range(0)"\n'
'\n'
'Operations and built-in functions that have a Boolean result '
'always\n'
'return "0" or "False" for false and "1" or "True" for true, unless\n'
'otherwise stated. (Important exception: the Boolean operations '
'"or"\n'
'and "and" always return one of their operands.)\n',
'try': 'The "try" statement\n'
'*******************\n'
'\n'
'The "try" statement specifies exception handlers and/or cleanup code\n'
'for a group of statements:\n'
'\n'
' try_stmt ::= try1_stmt | try2_stmt\n'
' try1_stmt ::= "try" ":" suite\n'
' ("except" [expression ["as" identifier]] ":" '
'suite)+\n'
' ["else" ":" suite]\n'
' ["finally" ":" suite]\n'
' try2_stmt ::= "try" ":" suite\n'
' "finally" ":" suite\n'
'\n'
'The "except" clause(s) specify one or more exception handlers. When '
'no\n'
'exception occurs in the "try" clause, no exception handler is\n'
'executed. When an exception occurs in the "try" suite, a search for '
'an\n'
'exception handler is started. This search inspects the except '
'clauses\n'
'in turn until one is found that matches the exception. An '
'expression-\n'
'less except clause, if present, must be last; it matches any\n'
'exception. For an except clause with an expression, that expression\n'
'is evaluated, and the clause matches the exception if the resulting\n'
'object is “compatible” with the exception. An object is compatible\n'
'with an exception if it is the class or a base class of the '
'exception\n'
'object or a tuple containing an item compatible with the exception.\n'
'\n'
'If no except clause matches the exception, the search for an '
'exception\n'
'handler continues in the surrounding code and on the invocation '
'stack.\n'
'[1]\n'
'\n'
'If the evaluation of an expression in the header of an except clause\n'
'raises an exception, the original search for a handler is canceled '
'and\n'
'a search starts for the new exception in the surrounding code and on\n'
'the call stack (it is treated as if the entire "try" statement '
'raised\n'
'the exception).\n'
'\n'
'When a matching except clause is found, the exception is assigned to\n'
'the target specified after the "as" keyword in that except clause, '
'if\n'
'present, and the except clause’s suite is executed. All except\n'
'clauses must have an executable block. When the end of this block '
'is\n'
'reached, execution continues normally after the entire try '
'statement.\n'
'(This means that if two nested handlers exist for the same '
'exception,\n'
'and the exception occurs in the try clause of the inner handler, the\n'
'outer handler will not handle the exception.)\n'
'\n'
'When an exception has been assigned using "as target", it is cleared\n'
'at the end of the except clause. This is as if\n'
'\n'
' except E as N:\n'
' foo\n'
'\n'
'was translated to\n'
'\n'
' except E as N:\n'
' try:\n'
' foo\n'
' finally:\n'
' del N\n'
'\n'
'This means the exception must be assigned to a different name to be\n'
'able to refer to it after the except clause. Exceptions are cleared\n'
'because with the traceback attached to them, they form a reference\n'
'cycle with the stack frame, keeping all locals in that frame alive\n'
'until the next garbage collection occurs.\n'
'\n'
'Before an except clause’s suite is executed, details about the\n'
'exception are stored in the "sys" module and can be accessed via\n'
'"sys.exc_info()". "sys.exc_info()" returns a 3-tuple consisting of '
'the\n'
'exception class, the exception instance and a traceback object (see\n'
'section The standard type hierarchy) identifying the point in the\n'
'program where the exception occurred. "sys.exc_info()" values are\n'
'restored to their previous values (before the call) when returning\n'
'from a function that handled an exception.\n'
'\n'
'The optional "else" clause is executed if and when control flows off\n'
'the end of the "try" clause. [2] Exceptions in the "else" clause are\n'
'not handled by the preceding "except" clauses.\n'
'\n'
'If "finally" is present, it specifies a ‘cleanup’ handler. The '
'"try"\n'
'clause is executed, including any "except" and "else" clauses. If '
'an\n'
'exception occurs in any of the clauses and is not handled, the\n'
'exception is temporarily saved. The "finally" clause is executed. '
'If\n'
'there is a saved exception it is re-raised at the end of the '
'"finally"\n'
'clause. If the "finally" clause raises another exception, the saved\n'
'exception is set as the context of the new exception. If the '
'"finally"\n'
'clause executes a "return" or "break" statement, the saved exception\n'
'is discarded:\n'
'\n'
' >>> def f():\n'
' ... try:\n'
' ... 1/0\n'
' ... finally:\n'
' ... return 42\n'
' ...\n'
' >>> f()\n'
' 42\n'
'\n'
'The exception information is not available to the program during\n'
'execution of the "finally" clause.\n'
'\n'
'When a "return", "break" or "continue" statement is executed in the\n'
'"try" suite of a "try"…"finally" statement, the "finally" clause is\n'
'also executed ‘on the way out.’ A "continue" statement is illegal in\n'
'the "finally" clause. (The reason is a problem with the current\n'
'implementation — this restriction may be lifted in the future).\n'
'\n'
'The return value of a function is determined by the last "return"\n'
'statement executed. Since the "finally" clause always executes, a\n'
'"return" statement executed in the "finally" clause will always be '
'the\n'
'last one executed:\n'
'\n'
' >>> def foo():\n'
' ... try:\n'
" ... return 'try'\n"
' ... finally:\n'
" ... return 'finally'\n"
' ...\n'
' >>> foo()\n'
" 'finally'\n"
'\n'
'Additional information on exceptions can be found in section\n'
'Exceptions, and information on using the "raise" statement to '
'generate\n'
'exceptions may be found in section The raise statement.\n',
'types': 'The standard type hierarchy\n'
'***************************\n'
'\n'
'Below is a list of the types that are built into Python. '
'Extension\n'
'modules (written in C, Java, or other languages, depending on the\n'
'implementation) can define additional types. Future versions of\n'
'Python may add types to the type hierarchy (e.g., rational '
'numbers,\n'
'efficiently stored arrays of integers, etc.), although such '
'additions\n'
'will often be provided via the standard library instead.\n'
'\n'
'Some of the type descriptions below contain a paragraph listing\n'
'‘special attributes.’ These are attributes that provide access to '
'the\n'
'implementation and are not intended for general use. Their '
'definition\n'
'may change in the future.\n'
'\n'
'None\n'
' This type has a single value. There is a single object with '
'this\n'
' value. This object is accessed through the built-in name "None". '
'It\n'
' is used to signify the absence of a value in many situations, '
'e.g.,\n'
' it is returned from functions that don’t explicitly return\n'
' anything. Its truth value is false.\n'
'\n'
'NotImplemented\n'
' This type has a single value. There is a single object with '
'this\n'
' value. This object is accessed through the built-in name\n'
' "NotImplemented". Numeric methods and rich comparison methods\n'
' should return this value if they do not implement the operation '
'for\n'
' the operands provided. (The interpreter will then try the\n'
' reflected operation, or some other fallback, depending on the\n'
' operator.) Its truth value is true.\n'
'\n'
' See Implementing the arithmetic operations for more details.\n'
'\n'
'Ellipsis\n'
' This type has a single value. There is a single object with '
'this\n'
' value. This object is accessed through the literal "..." or the\n'
' built-in name "Ellipsis". Its truth value is true.\n'
'\n'
'"numbers.Number"\n'
' These are created by numeric literals and returned as results '
'by\n'
' arithmetic operators and arithmetic built-in functions. '
'Numeric\n'
' objects are immutable; once created their value never changes.\n'
' Python numbers are of course strongly related to mathematical\n'
' numbers, but subject to the limitations of numerical '
'representation\n'
' in computers.\n'
'\n'
' Python distinguishes between integers, floating point numbers, '
'and\n'
' complex numbers:\n'
'\n'
' "numbers.Integral"\n'
' These represent elements from the mathematical set of '
'integers\n'
' (positive and negative).\n'
'\n'
' There are two types of integers:\n'
'\n'
' Integers ("int")\n'
'\n'
' These represent numbers in an unlimited range, subject to\n'
' available (virtual) memory only. For the purpose of '
'shift\n'
' and mask operations, a binary representation is assumed, '
'and\n'
' negative numbers are represented in a variant of 2’s\n'
' complement which gives the illusion of an infinite string '
'of\n'
' sign bits extending to the left.\n'
'\n'
' Booleans ("bool")\n'
' These represent the truth values False and True. The two\n'
' objects representing the values "False" and "True" are '
'the\n'
' only Boolean objects. The Boolean type is a subtype of '
'the\n'
' integer type, and Boolean values behave like the values 0 '
'and\n'
' 1, respectively, in almost all contexts, the exception '
'being\n'
' that when converted to a string, the strings ""False"" or\n'
' ""True"" are returned, respectively.\n'
'\n'
' The rules for integer representation are intended to give '
'the\n'
' most meaningful interpretation of shift and mask operations\n'
' involving negative integers.\n'
'\n'
' "numbers.Real" ("float")\n'
' These represent machine-level double precision floating '
'point\n'
' numbers. You are at the mercy of the underlying machine\n'
' architecture (and C or Java implementation) for the accepted\n'
' range and handling of overflow. Python does not support '
'single-\n'
' precision floating point numbers; the savings in processor '
'and\n'
' memory usage that are usually the reason for using these are\n'
' dwarfed by the overhead of using objects in Python, so there '
'is\n'
' no reason to complicate the language with two kinds of '
'floating\n'
' point numbers.\n'
'\n'
' "numbers.Complex" ("complex")\n'
' These represent complex numbers as a pair of machine-level\n'
' double precision floating point numbers. The same caveats '
'apply\n'
' as for floating point numbers. The real and imaginary parts '
'of a\n'
' complex number "z" can be retrieved through the read-only\n'
' attributes "z.real" and "z.imag".\n'
'\n'
'Sequences\n'
' These represent finite ordered sets indexed by non-negative\n'
' numbers. The built-in function "len()" returns the number of '
'items\n'
' of a sequence. When the length of a sequence is *n*, the index '
'set\n'
' contains the numbers 0, 1, …, *n*-1. Item *i* of sequence *a* '
'is\n'
' selected by "a[i]".\n'
'\n'
' Sequences also support slicing: "a[i:j]" selects all items with\n'
' index *k* such that *i* "<=" *k* "<" *j*. When used as an\n'
' expression, a slice is a sequence of the same type. This '
'implies\n'
' that the index set is renumbered so that it starts at 0.\n'
'\n'
' Some sequences also support “extended slicing” with a third '
'“step”\n'
' parameter: "a[i:j:k]" selects all items of *a* with index *x* '
'where\n'
' "x = i + n*k", *n* ">=" "0" and *i* "<=" *x* "<" *j*.\n'
'\n'
' Sequences are distinguished according to their mutability:\n'
'\n'
' Immutable sequences\n'
' An object of an immutable sequence type cannot change once it '
'is\n'
' created. (If the object contains references to other '
'objects,\n'
' these other objects may be mutable and may be changed; '
'however,\n'
' the collection of objects directly referenced by an '
'immutable\n'
' object cannot change.)\n'
'\n'
' The following types are immutable sequences:\n'
'\n'
' Strings\n'
' A string is a sequence of values that represent Unicode '
'code\n'
' points. All the code points in the range "U+0000 - '
'U+10FFFF"\n'
' can be represented in a string. Python doesn’t have a '
'"char"\n'
' type; instead, every code point in the string is '
'represented\n'
' as a string object with length "1". The built-in '
'function\n'
' "ord()" converts a code point from its string form to an\n'
' integer in the range "0 - 10FFFF"; "chr()" converts an\n'
' integer in the range "0 - 10FFFF" to the corresponding '
'length\n'
' "1" string object. "str.encode()" can be used to convert '
'a\n'
' "str" to "bytes" using the given text encoding, and\n'
' "bytes.decode()" can be used to achieve the opposite.\n'
'\n'
' Tuples\n'
' The items of a tuple are arbitrary Python objects. Tuples '
'of\n'
' two or more items are formed by comma-separated lists of\n'
' expressions. A tuple of one item (a ‘singleton’) can be\n'
' formed by affixing a comma to an expression (an expression '
'by\n'
' itself does not create a tuple, since parentheses must be\n'
' usable for grouping of expressions). An empty tuple can '
'be\n'
' formed by an empty pair of parentheses.\n'
'\n'
' Bytes\n'
' A bytes object is an immutable array. The items are '
'8-bit\n'
' bytes, represented by integers in the range 0 <= x < 256.\n'
' Bytes literals (like "b\'abc\'") and the built-in '
'"bytes()"\n'
' constructor can be used to create bytes objects. Also, '
'bytes\n'
' objects can be decoded to strings via the "decode()" '
'method.\n'
'\n'
' Mutable sequences\n'
' Mutable sequences can be changed after they are created. '
'The\n'
' subscription and slicing notations can be used as the target '
'of\n'
' assignment and "del" (delete) statements.\n'
'\n'
' There are currently two intrinsic mutable sequence types:\n'
'\n'
' Lists\n'
' The items of a list are arbitrary Python objects. Lists '
'are\n'
' formed by placing a comma-separated list of expressions '
'in\n'
' square brackets. (Note that there are no special cases '
'needed\n'
' to form lists of length 0 or 1.)\n'
'\n'
' Byte Arrays\n'
' A bytearray object is a mutable array. They are created '
'by\n'
' the built-in "bytearray()" constructor. Aside from being\n'
' mutable (and hence unhashable), byte arrays otherwise '
'provide\n'
' the same interface and functionality as immutable "bytes"\n'
' objects.\n'
'\n'
' The extension module "array" provides an additional example '
'of a\n'
' mutable sequence type, as does the "collections" module.\n'
'\n'
'Set types\n'
' These represent unordered, finite sets of unique, immutable\n'
' objects. As such, they cannot be indexed by any subscript. '
'However,\n'
' they can be iterated over, and the built-in function "len()"\n'
' returns the number of items in a set. Common uses for sets are '
'fast\n'
' membership testing, removing duplicates from a sequence, and\n'
' computing mathematical operations such as intersection, union,\n'
' difference, and symmetric difference.\n'
'\n'
' For set elements, the same immutability rules apply as for\n'
' dictionary keys. Note that numeric types obey the normal rules '
'for\n'
' numeric comparison: if two numbers compare equal (e.g., "1" and\n'
' "1.0"), only one of them can be contained in a set.\n'
'\n'
' There are currently two intrinsic set types:\n'
'\n'
' Sets\n'
' These represent a mutable set. They are created by the '
'built-in\n'
' "set()" constructor and can be modified afterwards by '
'several\n'
' methods, such as "add()".\n'
'\n'
' Frozen sets\n'
' These represent an immutable set. They are created by the\n'
' built-in "frozenset()" constructor. As a frozenset is '
'immutable\n'
' and *hashable*, it can be used again as an element of '
'another\n'
' set, or as a dictionary key.\n'
'\n'
'Mappings\n'
' These represent finite sets of objects indexed by arbitrary '
'index\n'
' sets. The subscript notation "a[k]" selects the item indexed by '
'"k"\n'
' from the mapping "a"; this can be used in expressions and as '
'the\n'
' target of assignments or "del" statements. The built-in '
'function\n'
' "len()" returns the number of items in a mapping.\n'
'\n'
' There is currently a single intrinsic mapping type:\n'
'\n'
' Dictionaries\n'
' These represent finite sets of objects indexed by nearly\n'
' arbitrary values. The only types of values not acceptable '
'as\n'
' keys are values containing lists or dictionaries or other\n'
' mutable types that are compared by value rather than by '
'object\n'
' identity, the reason being that the efficient implementation '
'of\n'
' dictionaries requires a key’s hash value to remain constant.\n'
' Numeric types used for keys obey the normal rules for '
'numeric\n'
' comparison: if two numbers compare equal (e.g., "1" and '
'"1.0")\n'
' then they can be used interchangeably to index the same\n'
' dictionary entry.\n'
'\n'
' Dictionaries are mutable; they can be created by the "{...}"\n'
' notation (see section Dictionary displays).\n'
'\n'
' The extension modules "dbm.ndbm" and "dbm.gnu" provide\n'
' additional examples of mapping types, as does the '
'"collections"\n'
' module.\n'
'\n'
'Callable types\n'
' These are the types to which the function call operation (see\n'
' section Calls) can be applied:\n'
'\n'
' User-defined functions\n'
' A user-defined function object is created by a function\n'
' definition (see section Function definitions). It should be\n'
' called with an argument list containing the same number of '
'items\n'
' as the function’s formal parameter list.\n'
'\n'
' Special attributes:\n'
'\n'
' '
'+---------------------------+---------------------------------+-------------+\n'
' | Attribute | Meaning '
'| |\n'
' '
'+===========================+=================================+=============+\n'
' | "__doc__" | The function’s documentation '
'| Writable |\n'
' | | string, or "None" if '
'| |\n'
' | | unavailable; not inherited by '
'| |\n'
' | | subclasses '
'| |\n'
' '
'+---------------------------+---------------------------------+-------------+\n'
' | "__name__" | The function’s name '
'| Writable |\n'
' '
'+---------------------------+---------------------------------+-------------+\n'
' | "__qualname__" | The function’s *qualified name* '
'| Writable |\n'
' | | New in version 3.3. '
'| |\n'
' '
'+---------------------------+---------------------------------+-------------+\n'
' | "__module__" | The name of the module the '
'| Writable |\n'
' | | function was defined in, or '
'| |\n'
' | | "None" if unavailable. '
'| |\n'
' '
'+---------------------------+---------------------------------+-------------+\n'
' | "__defaults__" | A tuple containing default '
'| Writable |\n'
' | | argument values for those '
'| |\n'
' | | arguments that have defaults, '
'| |\n'
' | | or "None" if no arguments have '
'| |\n'
' | | a default value '
'| |\n'
' '
'+---------------------------+---------------------------------+-------------+\n'
' | "__code__" | The code object representing '
'| Writable |\n'
' | | the compiled function body. '
'| |\n'
' '
'+---------------------------+---------------------------------+-------------+\n'
' | "__globals__" | A reference to the dictionary '
'| Read-only |\n'
' | | that holds the function’s '
'| |\n'
' | | global variables — the global '
'| |\n'
' | | namespace of the module in '
'| |\n'
' | | which the function was defined. '
'| |\n'
' '
'+---------------------------+---------------------------------+-------------+\n'
' | "__dict__" | The namespace supporting '
'| Writable |\n'
' | | arbitrary function attributes. '
'| |\n'
' '
'+---------------------------+---------------------------------+-------------+\n'
' | "__closure__" | "None" or a tuple of cells that '
'| Read-only |\n'
' | | contain bindings for the '
'| |\n'
' | | function’s free variables. '
'| |\n'
' '
'+---------------------------+---------------------------------+-------------+\n'
' | "__annotations__" | A dict containing annotations '
'| Writable |\n'
' | | of parameters. The keys of the '
'| |\n'
' | | dict are the parameter names, '
'| |\n'
' | | and "\'return\'" for the '
'return | |\n'
' | | annotation, if provided. '
'| |\n'
' '
'+---------------------------+---------------------------------+-------------+\n'
' | "__kwdefaults__" | A dict containing defaults for '
'| Writable |\n'
' | | keyword-only parameters. '
'| |\n'
' '
'+---------------------------+---------------------------------+-------------+\n'
'\n'
' Most of the attributes labelled “Writable” check the type of '
'the\n'
' assigned value.\n'
'\n'
' Function objects also support getting and setting arbitrary\n'
' attributes, which can be used, for example, to attach '
'metadata\n'
' to functions. Regular attribute dot-notation is used to get '
'and\n'
' set such attributes. *Note that the current implementation '
'only\n'
' supports function attributes on user-defined functions. '
'Function\n'
' attributes on built-in functions may be supported in the\n'
' future.*\n'
'\n'
' Additional information about a function’s definition can be\n'
' retrieved from its code object; see the description of '
'internal\n'
' types below.\n'
'\n'
' Instance methods\n'
' An instance method object combines a class, a class instance '
'and\n'
' any callable object (normally a user-defined function).\n'
'\n'
' Special read-only attributes: "__self__" is the class '
'instance\n'
' object, "__func__" is the function object; "__doc__" is the\n'
' method’s documentation (same as "__func__.__doc__"); '
'"__name__"\n'
' is the method name (same as "__func__.__name__"); '
'"__module__"\n'
' is the name of the module the method was defined in, or '
'"None"\n'
' if unavailable.\n'
'\n'
' Methods also support accessing (but not setting) the '
'arbitrary\n'
' function attributes on the underlying function object.\n'
'\n'
' User-defined method objects may be created when getting an\n'
' attribute of a class (perhaps via an instance of that class), '
'if\n'
' that attribute is a user-defined function object or a class\n'
' method object.\n'
'\n'
' When an instance method object is created by retrieving a '
'user-\n'
' defined function object from a class via one of its '
'instances,\n'
' its "__self__" attribute is the instance, and the method '
'object\n'
' is said to be bound. The new method’s "__func__" attribute '
'is\n'
' the original function object.\n'
'\n'
' When a user-defined method object is created by retrieving\n'
' another method object from a class or instance, the behaviour '
'is\n'
' the same as for a function object, except that the '
'"__func__"\n'
' attribute of the new instance is not the original method '
'object\n'
' but its "__func__" attribute.\n'
'\n'
' When an instance method object is created by retrieving a '
'class\n'
' method object from a class or instance, its "__self__" '
'attribute\n'
' is the class itself, and its "__func__" attribute is the\n'
' function object underlying the class method.\n'
'\n'
' When an instance method object is called, the underlying\n'
' function ("__func__") is called, inserting the class '
'instance\n'
' ("__self__") in front of the argument list. For instance, '
'when\n'
' "C" is a class which contains a definition for a function '
'"f()",\n'
' and "x" is an instance of "C", calling "x.f(1)" is equivalent '
'to\n'
' calling "C.f(x, 1)".\n'
'\n'
' When an instance method object is derived from a class '
'method\n'
' object, the “class instance” stored in "__self__" will '
'actually\n'
' be the class itself, so that calling either "x.f(1)" or '
'"C.f(1)"\n'
' is equivalent to calling "f(C,1)" where "f" is the '
'underlying\n'
' function.\n'
'\n'
' Note that the transformation from function object to '
'instance\n'
' method object happens each time the attribute is retrieved '
'from\n'
' the instance. In some cases, a fruitful optimization is to\n'
' assign the attribute to a local variable and call that local\n'
' variable. Also notice that this transformation only happens '
'for\n'
' user-defined functions; other callable objects (and all non-\n'
' callable objects) are retrieved without transformation. It '
'is\n'
' also important to note that user-defined functions which are\n'
' attributes of a class instance are not converted to bound\n'
' methods; this *only* happens when the function is an '
'attribute\n'
' of the class.\n'
'\n'
' Generator functions\n'
' A function or method which uses the "yield" statement (see\n'
' section The yield statement) is called a *generator '
'function*.\n'
' Such a function, when called, always returns an iterator '
'object\n'
' which can be used to execute the body of the function: '
'calling\n'
' the iterator’s "iterator.__next__()" method will cause the\n'
' function to execute until it provides a value using the '
'"yield"\n'
' statement. When the function executes a "return" statement '
'or\n'
' falls off the end, a "StopIteration" exception is raised and '
'the\n'
' iterator will have reached the end of the set of values to '
'be\n'
' returned.\n'
'\n'
' Coroutine functions\n'
' A function or method which is defined using "async def" is\n'
' called a *coroutine function*. Such a function, when '
'called,\n'
' returns a *coroutine* object. It may contain "await"\n'
' expressions, as well as "async with" and "async for" '
'statements.\n'
' See also the Coroutine Objects section.\n'
'\n'
' Asynchronous generator functions\n'
' A function or method which is defined using "async def" and\n'
' which uses the "yield" statement is called a *asynchronous\n'
' generator function*. Such a function, when called, returns '
'an\n'
' asynchronous iterator object which can be used in an "async '
'for"\n'
' statement to execute the body of the function.\n'
'\n'
' Calling the asynchronous iterator’s "aiterator.__anext__()"\n'
' method will return an *awaitable* which when awaited will\n'
' execute until it provides a value using the "yield" '
'expression.\n'
' When the function executes an empty "return" statement or '
'falls\n'
' off the end, a "StopAsyncIteration" exception is raised and '
'the\n'
' asynchronous iterator will have reached the end of the set '
'of\n'
' values to be yielded.\n'
'\n'
' Built-in functions\n'
' A built-in function object is a wrapper around a C function.\n'
' Examples of built-in functions are "len()" and "math.sin()"\n'
' ("math" is a standard built-in module). The number and type '
'of\n'
' the arguments are determined by the C function. Special '
'read-\n'
' only attributes: "__doc__" is the function’s documentation\n'
' string, or "None" if unavailable; "__name__" is the '
'function’s\n'
' name; "__self__" is set to "None" (but see the next item);\n'
' "__module__" is the name of the module the function was '
'defined\n'
' in or "None" if unavailable.\n'
'\n'
' Built-in methods\n'
' This is really a different disguise of a built-in function, '
'this\n'
' time containing an object passed to the C function as an\n'
' implicit extra argument. An example of a built-in method is\n'
' "alist.append()", assuming *alist* is a list object. In this\n'
' case, the special read-only attribute "__self__" is set to '
'the\n'
' object denoted by *alist*.\n'
'\n'
' Classes\n'
' Classes are callable. These objects normally act as '
'factories\n'
' for new instances of themselves, but variations are possible '
'for\n'
' class types that override "__new__()". The arguments of the\n'
' call are passed to "__new__()" and, in the typical case, to\n'
' "__init__()" to initialize the new instance.\n'
'\n'
' Class Instances\n'
' Instances of arbitrary classes can be made callable by '
'defining\n'
' a "__call__()" method in their class.\n'
'\n'
'Modules\n'
' Modules are a basic organizational unit of Python code, and are\n'
' created by the import system as invoked either by the "import"\n'
' statement (see "import"), or by calling functions such as\n'
' "importlib.import_module()" and built-in "__import__()". A '
'module\n'
' object has a namespace implemented by a dictionary object (this '
'is\n'
' the dictionary referenced by the "__globals__" attribute of\n'
' functions defined in the module). Attribute references are\n'
' translated to lookups in this dictionary, e.g., "m.x" is '
'equivalent\n'
' to "m.__dict__["x"]". A module object does not contain the code\n'
' object used to initialize the module (since it isn’t needed '
'once\n'
' the initialization is done).\n'
'\n'
' Attribute assignment updates the module’s namespace dictionary,\n'
' e.g., "m.x = 1" is equivalent to "m.__dict__["x"] = 1".\n'
'\n'
' Predefined (writable) attributes: "__name__" is the module’s '
'name;\n'
' "__doc__" is the module’s documentation string, or "None" if\n'
' unavailable; "__annotations__" (optional) is a dictionary\n'
' containing *variable annotations* collected during module body\n'
' execution; "__file__" is the pathname of the file from which '
'the\n'
' module was loaded, if it was loaded from a file. The "__file__"\n'
' attribute may be missing for certain types of modules, such as '
'C\n'
' modules that are statically linked into the interpreter; for\n'
' extension modules loaded dynamically from a shared library, it '
'is\n'
' the pathname of the shared library file.\n'
'\n'
' Special read-only attribute: "__dict__" is the module’s '
'namespace\n'
' as a dictionary object.\n'
'\n'
' **CPython implementation detail:** Because of the way CPython\n'
' clears module dictionaries, the module dictionary will be '
'cleared\n'
' when the module falls out of scope even if the dictionary still '
'has\n'
' live references. To avoid this, copy the dictionary or keep '
'the\n'
' module around while using its dictionary directly.\n'
'\n'
'Custom classes\n'
' Custom class types are typically created by class definitions '
'(see\n'
' section Class definitions). A class has a namespace implemented '
'by\n'
' a dictionary object. Class attribute references are translated '
'to\n'
' lookups in this dictionary, e.g., "C.x" is translated to\n'
' "C.__dict__["x"]" (although there are a number of hooks which '
'allow\n'
' for other means of locating attributes). When the attribute name '
'is\n'
' not found there, the attribute search continues in the base\n'
' classes. This search of the base classes uses the C3 method\n'
' resolution order which behaves correctly even in the presence '
'of\n'
' ‘diamond’ inheritance structures where there are multiple\n'
' inheritance paths leading back to a common ancestor. Additional\n'
' details on the C3 MRO used by Python can be found in the\n'
' documentation accompanying the 2.3 release at\n'
' https://www.python.org/download/releases/2.3/mro/.\n'
'\n'
' When a class attribute reference (for class "C", say) would '
'yield a\n'
' class method object, it is transformed into an instance method\n'
' object whose "__self__" attribute is "C". When it would yield '
'a\n'
' static method object, it is transformed into the object wrapped '
'by\n'
' the static method object. See section Implementing Descriptors '
'for\n'
' another way in which attributes retrieved from a class may '
'differ\n'
' from those actually contained in its "__dict__".\n'
'\n'
' Class attribute assignments update the class’s dictionary, '
'never\n'
' the dictionary of a base class.\n'
'\n'
' A class object can be called (see above) to yield a class '
'instance\n'
' (see below).\n'
'\n'
' Special attributes: "__name__" is the class name; "__module__" '
'is\n'
' the module name in which the class was defined; "__dict__" is '
'the\n'
' dictionary containing the class’s namespace; "__bases__" is a '
'tuple\n'
' containing the base classes, in the order of their occurrence '
'in\n'
' the base class list; "__doc__" is the class’s documentation '
'string,\n'
' or "None" if undefined; "__annotations__" (optional) is a\n'
' dictionary containing *variable annotations* collected during '
'class\n'
' body execution.\n'
'\n'
'Class instances\n'
' A class instance is created by calling a class object (see '
'above).\n'
' A class instance has a namespace implemented as a dictionary '
'which\n'
' is the first place in which attribute references are searched.\n'
' When an attribute is not found there, and the instance’s class '
'has\n'
' an attribute by that name, the search continues with the class\n'
' attributes. If a class attribute is found that is a '
'user-defined\n'
' function object, it is transformed into an instance method '
'object\n'
' whose "__self__" attribute is the instance. Static method and\n'
' class method objects are also transformed; see above under\n'
' “Classes”. See section Implementing Descriptors for another way '
'in\n'
' which attributes of a class retrieved via its instances may '
'differ\n'
' from the objects actually stored in the class’s "__dict__". If '
'no\n'
' class attribute is found, and the object’s class has a\n'
' "__getattr__()" method, that is called to satisfy the lookup.\n'
'\n'
' Attribute assignments and deletions update the instance’s\n'
' dictionary, never a class’s dictionary. If the class has a\n'
' "__setattr__()" or "__delattr__()" method, this is called '
'instead\n'
' of updating the instance dictionary directly.\n'
'\n'
' Class instances can pretend to be numbers, sequences, or '
'mappings\n'
' if they have methods with certain special names. See section\n'
' Special method names.\n'
'\n'
' Special attributes: "__dict__" is the attribute dictionary;\n'
' "__class__" is the instance’s class.\n'
'\n'
'I/O objects (also known as file objects)\n'
' A *file object* represents an open file. Various shortcuts are\n'
' available to create file objects: the "open()" built-in '
'function,\n'
' and also "os.popen()", "os.fdopen()", and the "makefile()" '
'method\n'
' of socket objects (and perhaps by other functions or methods\n'
' provided by extension modules).\n'
'\n'
' The objects "sys.stdin", "sys.stdout" and "sys.stderr" are\n'
' initialized to file objects corresponding to the interpreter’s\n'
' standard input, output and error streams; they are all open in '
'text\n'
' mode and therefore follow the interface defined by the\n'
' "io.TextIOBase" abstract class.\n'
'\n'
'Internal types\n'
' A few types used internally by the interpreter are exposed to '
'the\n'
' user. Their definitions may change with future versions of the\n'
' interpreter, but they are mentioned here for completeness.\n'
'\n'
' Code objects\n'
' Code objects represent *byte-compiled* executable Python '
'code,\n'
' or *bytecode*. The difference between a code object and a\n'
' function object is that the function object contains an '
'explicit\n'
' reference to the function’s globals (the module in which it '
'was\n'
' defined), while a code object contains no context; also the\n'
' default argument values are stored in the function object, '
'not\n'
' in the code object (because they represent values calculated '
'at\n'
' run-time). Unlike function objects, code objects are '
'immutable\n'
' and contain no references (directly or indirectly) to '
'mutable\n'
' objects.\n'
'\n'
' Special read-only attributes: "co_name" gives the function '
'name;\n'
' "co_argcount" is the number of positional arguments '
'(including\n'
' arguments with default values); "co_nlocals" is the number '
'of\n'
' local variables used by the function (including arguments);\n'
' "co_varnames" is a tuple containing the names of the local\n'
' variables (starting with the argument names); "co_cellvars" '
'is a\n'
' tuple containing the names of local variables that are\n'
' referenced by nested functions; "co_freevars" is a tuple\n'
' containing the names of free variables; "co_code" is a '
'string\n'
' representing the sequence of bytecode instructions; '
'"co_consts"\n'
' is a tuple containing the literals used by the bytecode;\n'
' "co_names" is a tuple containing the names used by the '
'bytecode;\n'
' "co_filename" is the filename from which the code was '
'compiled;\n'
' "co_firstlineno" is the first line number of the function;\n'
' "co_lnotab" is a string encoding the mapping from bytecode\n'
' offsets to line numbers (for details see the source code of '
'the\n'
' interpreter); "co_stacksize" is the required stack size\n'
' (including local variables); "co_flags" is an integer '
'encoding a\n'
' number of flags for the interpreter.\n'
'\n'
' The following flag bits are defined for "co_flags": bit '
'"0x04"\n'
' is set if the function uses the "*arguments" syntax to accept '
'an\n'
' arbitrary number of positional arguments; bit "0x08" is set '
'if\n'
' the function uses the "**keywords" syntax to accept '
'arbitrary\n'
' keyword arguments; bit "0x20" is set if the function is a\n'
' generator.\n'
'\n'
' Future feature declarations ("from __future__ import '
'division")\n'
' also use bits in "co_flags" to indicate whether a code '
'object\n'
' was compiled with a particular feature enabled: bit "0x2000" '
'is\n'
' set if the function was compiled with future division '
'enabled;\n'
' bits "0x10" and "0x1000" were used in earlier versions of\n'
' Python.\n'
'\n'
' Other bits in "co_flags" are reserved for internal use.\n'
'\n'
' If a code object represents a function, the first item in\n'
' "co_consts" is the documentation string of the function, or\n'
' "None" if undefined.\n'
'\n'
' Frame objects\n'
' Frame objects represent execution frames. They may occur in\n'
' traceback objects (see below).\n'
'\n'
' Special read-only attributes: "f_back" is to the previous '
'stack\n'
' frame (towards the caller), or "None" if this is the bottom\n'
' stack frame; "f_code" is the code object being executed in '
'this\n'
' frame; "f_locals" is the dictionary used to look up local\n'
' variables; "f_globals" is used for global variables;\n'
' "f_builtins" is used for built-in (intrinsic) names; '
'"f_lasti"\n'
' gives the precise instruction (this is an index into the\n'
' bytecode string of the code object).\n'
'\n'
' Special writable attributes: "f_trace", if not "None", is a\n'
' function called at the start of each source code line (this '
'is\n'
' used by the debugger); "f_lineno" is the current line number '
'of\n'
' the frame — writing to this from within a trace function '
'jumps\n'
' to the given line (only for the bottom-most frame). A '
'debugger\n'
' can implement a Jump command (aka Set Next Statement) by '
'writing\n'
' to f_lineno.\n'
'\n'
' Frame objects support one method:\n'
'\n'
' frame.clear()\n'
'\n'
' This method clears all references to local variables held '
'by\n'
' the frame. Also, if the frame belonged to a generator, '
'the\n'
' generator is finalized. This helps break reference '
'cycles\n'
' involving frame objects (for example when catching an\n'
' exception and storing its traceback for later use).\n'
'\n'
' "RuntimeError" is raised if the frame is currently '
'executing.\n'
'\n'
' New in version 3.4.\n'
'\n'
' Traceback objects\n'
' Traceback objects represent a stack trace of an exception. '
'A\n'
' traceback object is created when an exception occurs. When '
'the\n'
' search for an exception handler unwinds the execution stack, '
'at\n'
' each unwound level a traceback object is inserted in front '
'of\n'
' the current traceback. When an exception handler is '
'entered,\n'
' the stack trace is made available to the program. (See '
'section\n'
' The try statement.) It is accessible as the third item of '
'the\n'
' tuple returned by "sys.exc_info()". When the program contains '
'no\n'
' suitable handler, the stack trace is written (nicely '
'formatted)\n'
' to the standard error stream; if the interpreter is '
'interactive,\n'
' it is also made available to the user as '
'"sys.last_traceback".\n'
'\n'
' Special read-only attributes: "tb_next" is the next level in '
'the\n'
' stack trace (towards the frame where the exception occurred), '
'or\n'
' "None" if there is no next level; "tb_frame" points to the\n'
' execution frame of the current level; "tb_lineno" gives the '
'line\n'
' number where the exception occurred; "tb_lasti" indicates '
'the\n'
' precise instruction. The line number and last instruction '
'in\n'
' the traceback may differ from the line number of its frame\n'
' object if the exception occurred in a "try" statement with '
'no\n'
' matching except clause or with a finally clause.\n'
'\n'
' Slice objects\n'
' Slice objects are used to represent slices for '
'"__getitem__()"\n'
' methods. They are also created by the built-in "slice()"\n'
' function.\n'
'\n'
' Special read-only attributes: "start" is the lower bound; '
'"stop"\n'
' is the upper bound; "step" is the step value; each is "None" '
'if\n'
' omitted. These attributes can have any type.\n'
'\n'
' Slice objects support one method:\n'
'\n'
' slice.indices(self, length)\n'
'\n'
' This method takes a single integer argument *length* and\n'
' computes information about the slice that the slice '
'object\n'
' would describe if applied to a sequence of *length* '
'items.\n'
' It returns a tuple of three integers; respectively these '
'are\n'
' the *start* and *stop* indices and the *step* or stride\n'
' length of the slice. Missing or out-of-bounds indices are\n'
' handled in a manner consistent with regular slices.\n'
'\n'
' Static method objects\n'
' Static method objects provide a way of defeating the\n'
' transformation of function objects to method objects '
'described\n'
' above. A static method object is a wrapper around any other\n'
' object, usually a user-defined method object. When a static\n'
' method object is retrieved from a class or a class instance, '
'the\n'
' object actually returned is the wrapped object, which is not\n'
' subject to any further transformation. Static method objects '
'are\n'
' not themselves callable, although the objects they wrap '
'usually\n'
' are. Static method objects are created by the built-in\n'
' "staticmethod()" constructor.\n'
'\n'
' Class method objects\n'
' A class method object, like a static method object, is a '
'wrapper\n'
' around another object that alters the way in which that '
'object\n'
' is retrieved from classes and class instances. The behaviour '
'of\n'
' class method objects upon such retrieval is described above,\n'
' under “User-defined methods”. Class method objects are '
'created\n'
' by the built-in "classmethod()" constructor.\n',
'typesfunctions': 'Functions\n'
'*********\n'
'\n'
'Function objects are created by function definitions. The '
'only\n'
'operation on a function object is to call it: '
'"func(argument-list)".\n'
'\n'
'There are really two flavors of function objects: built-in '
'functions\n'
'and user-defined functions. Both support the same '
'operation (to call\n'
'the function), but the implementation is different, hence '
'the\n'
'different object types.\n'
'\n'
'See Function definitions for more information.\n',
'typesmapping': 'Mapping Types — "dict"\n'
'**********************\n'
'\n'
'A *mapping* object maps *hashable* values to arbitrary '
'objects.\n'
'Mappings are mutable objects. There is currently only one '
'standard\n'
'mapping type, the *dictionary*. (For other containers see '
'the built-\n'
'in "list", "set", and "tuple" classes, and the "collections" '
'module.)\n'
'\n'
'A dictionary’s keys are *almost* arbitrary values. Values '
'that are\n'
'not *hashable*, that is, values containing lists, '
'dictionaries or\n'
'other mutable types (that are compared by value rather than '
'by object\n'
'identity) may not be used as keys. Numeric types used for '
'keys obey\n'
'the normal rules for numeric comparison: if two numbers '
'compare equal\n'
'(such as "1" and "1.0") then they can be used '
'interchangeably to index\n'
'the same dictionary entry. (Note however, that since '
'computers store\n'
'floating-point numbers as approximations it is usually '
'unwise to use\n'
'them as dictionary keys.)\n'
'\n'
'Dictionaries can be created by placing a comma-separated '
'list of "key:\n'
'value" pairs within braces, for example: "{\'jack\': 4098, '
"'sjoerd':\n"
'4127}" or "{4098: \'jack\', 4127: \'sjoerd\'}", or by the '
'"dict"\n'
'constructor.\n'
'\n'
'class dict(**kwarg)\n'
'class dict(mapping, **kwarg)\n'
'class dict(iterable, **kwarg)\n'
'\n'
' Return a new dictionary initialized from an optional '
'positional\n'
' argument and a possibly empty set of keyword arguments.\n'
'\n'
' If no positional argument is given, an empty dictionary '
'is created.\n'
' If a positional argument is given and it is a mapping '
'object, a\n'
' dictionary is created with the same key-value pairs as '
'the mapping\n'
' object. Otherwise, the positional argument must be an '
'*iterable*\n'
' object. Each item in the iterable must itself be an '
'iterable with\n'
' exactly two objects. The first object of each item '
'becomes a key\n'
' in the new dictionary, and the second object the '
'corresponding\n'
' value. If a key occurs more than once, the last value '
'for that key\n'
' becomes the corresponding value in the new dictionary.\n'
'\n'
' If keyword arguments are given, the keyword arguments and '
'their\n'
' values are added to the dictionary created from the '
'positional\n'
' argument. If a key being added is already present, the '
'value from\n'
' the keyword argument replaces the value from the '
'positional\n'
' argument.\n'
'\n'
' To illustrate, the following examples all return a '
'dictionary equal\n'
' to "{"one": 1, "two": 2, "three": 3}":\n'
'\n'
' >>> a = dict(one=1, two=2, three=3)\n'
" >>> b = {'one': 1, 'two': 2, 'three': 3}\n"
" >>> c = dict(zip(['one', 'two', 'three'], [1, 2, 3]))\n"
" >>> d = dict([('two', 2), ('one', 1), ('three', 3)])\n"
" >>> e = dict({'three': 3, 'one': 1, 'two': 2})\n"
' >>> a == b == c == d == e\n'
' True\n'
'\n'
' Providing keyword arguments as in the first example only '
'works for\n'
' keys that are valid Python identifiers. Otherwise, any '
'valid keys\n'
' can be used.\n'
'\n'
' These are the operations that dictionaries support (and '
'therefore,\n'
' custom mapping types should support too):\n'
'\n'
' len(d)\n'
'\n'
' Return the number of items in the dictionary *d*.\n'
'\n'
' d[key]\n'
'\n'
' Return the item of *d* with key *key*. Raises a '
'"KeyError" if\n'
' *key* is not in the map.\n'
'\n'
' If a subclass of dict defines a method "__missing__()" '
'and *key*\n'
' is not present, the "d[key]" operation calls that '
'method with\n'
' the key *key* as argument. The "d[key]" operation '
'then returns\n'
' or raises whatever is returned or raised by the\n'
' "__missing__(key)" call. No other operations or '
'methods invoke\n'
' "__missing__()". If "__missing__()" is not defined, '
'"KeyError"\n'
' is raised. "__missing__()" must be a method; it cannot '
'be an\n'
' instance variable:\n'
'\n'
' >>> class Counter(dict):\n'
' ... def __missing__(self, key):\n'
' ... return 0\n'
' >>> c = Counter()\n'
" >>> c['red']\n"
' 0\n'
" >>> c['red'] += 1\n"
" >>> c['red']\n"
' 1\n'
'\n'
' The example above shows part of the implementation of\n'
' "collections.Counter". A different "__missing__" '
'method is used\n'
' by "collections.defaultdict".\n'
'\n'
' d[key] = value\n'
'\n'
' Set "d[key]" to *value*.\n'
'\n'
' del d[key]\n'
'\n'
' Remove "d[key]" from *d*. Raises a "KeyError" if '
'*key* is not\n'
' in the map.\n'
'\n'
' key in d\n'
'\n'
' Return "True" if *d* has a key *key*, else "False".\n'
'\n'
' key not in d\n'
'\n'
' Equivalent to "not key in d".\n'
'\n'
' iter(d)\n'
'\n'
' Return an iterator over the keys of the dictionary. '
'This is a\n'
' shortcut for "iter(d.keys())".\n'
'\n'
' clear()\n'
'\n'
' Remove all items from the dictionary.\n'
'\n'
' copy()\n'
'\n'
' Return a shallow copy of the dictionary.\n'
'\n'
' classmethod fromkeys(seq[, value])\n'
'\n'
' Create a new dictionary with keys from *seq* and '
'values set to\n'
' *value*.\n'
'\n'
' "fromkeys()" is a class method that returns a new '
'dictionary.\n'
' *value* defaults to "None".\n'
'\n'
' get(key[, default])\n'
'\n'
' Return the value for *key* if *key* is in the '
'dictionary, else\n'
' *default*. If *default* is not given, it defaults to '
'"None", so\n'
' that this method never raises a "KeyError".\n'
'\n'
' items()\n'
'\n'
' Return a new view of the dictionary’s items ("(key, '
'value)"\n'
' pairs). See the documentation of view objects.\n'
'\n'
' keys()\n'
'\n'
' Return a new view of the dictionary’s keys. See the\n'
' documentation of view objects.\n'
'\n'
' pop(key[, default])\n'
'\n'
' If *key* is in the dictionary, remove it and return '
'its value,\n'
' else return *default*. If *default* is not given and '
'*key* is\n'
' not in the dictionary, a "KeyError" is raised.\n'
'\n'
' popitem()\n'
'\n'
' Remove and return an arbitrary "(key, value)" pair '
'from the\n'
' dictionary.\n'
'\n'
' "popitem()" is useful to destructively iterate over a\n'
' dictionary, as often used in set algorithms. If the '
'dictionary\n'
' is empty, calling "popitem()" raises a "KeyError".\n'
'\n'
' setdefault(key[, default])\n'
'\n'
' If *key* is in the dictionary, return its value. If '
'not, insert\n'
' *key* with a value of *default* and return *default*. '
'*default*\n'
' defaults to "None".\n'
'\n'
' update([other])\n'
'\n'
' Update the dictionary with the key/value pairs from '
'*other*,\n'
' overwriting existing keys. Return "None".\n'
'\n'
' "update()" accepts either another dictionary object or '
'an\n'
' iterable of key/value pairs (as tuples or other '
'iterables of\n'
' length two). If keyword arguments are specified, the '
'dictionary\n'
' is then updated with those key/value pairs: '
'"d.update(red=1,\n'
' blue=2)".\n'
'\n'
' values()\n'
'\n'
' Return a new view of the dictionary’s values. See '
'the\n'
' documentation of view objects.\n'
'\n'
' Dictionaries compare equal if and only if they have the '
'same "(key,\n'
' value)" pairs. Order comparisons (‘<’, ‘<=’, ‘>=’, ‘>’) '
'raise\n'
' "TypeError".\n'
'\n'
'See also: "types.MappingProxyType" can be used to create a '
'read-only\n'
' view of a "dict".\n'
'\n'
'\n'
'Dictionary view objects\n'
'=======================\n'
'\n'
'The objects returned by "dict.keys()", "dict.values()" and\n'
'"dict.items()" are *view objects*. They provide a dynamic '
'view on the\n'
'dictionary’s entries, which means that when the dictionary '
'changes,\n'
'the view reflects these changes.\n'
'\n'
'Dictionary views can be iterated over to yield their '
'respective data,\n'
'and support membership tests:\n'
'\n'
'len(dictview)\n'
'\n'
' Return the number of entries in the dictionary.\n'
'\n'
'iter(dictview)\n'
'\n'
' Return an iterator over the keys, values or items '
'(represented as\n'
' tuples of "(key, value)") in the dictionary.\n'
'\n'
' Keys and values are iterated over in an arbitrary order '
'which is\n'
' non-random, varies across Python implementations, and '
'depends on\n'
' the dictionary’s history of insertions and deletions. If '
'keys,\n'
' values and items views are iterated over with no '
'intervening\n'
' modifications to the dictionary, the order of items will '
'directly\n'
' correspond. This allows the creation of "(value, key)" '
'pairs using\n'
' "zip()": "pairs = zip(d.values(), d.keys())". Another '
'way to\n'
' create the same list is "pairs = [(v, k) for (k, v) in '
'd.items()]".\n'
'\n'
' Iterating views while adding or deleting entries in the '
'dictionary\n'
' may raise a "RuntimeError" or fail to iterate over all '
'entries.\n'
'\n'
'x in dictview\n'
'\n'
' Return "True" if *x* is in the underlying dictionary’s '
'keys, values\n'
' or items (in the latter case, *x* should be a "(key, '
'value)"\n'
' tuple).\n'
'\n'
'Keys views are set-like since their entries are unique and '
'hashable.\n'
'If all values are hashable, so that "(key, value)" pairs are '
'unique\n'
'and hashable, then the items view is also set-like. (Values '
'views are\n'
'not treated as set-like since the entries are generally not '
'unique.)\n'
'For set-like views, all of the operations defined for the '
'abstract\n'
'base class "collections.abc.Set" are available (for example, '
'"==",\n'
'"<", or "^").\n'
'\n'
'An example of dictionary view usage:\n'
'\n'
" >>> dishes = {'eggs': 2, 'sausage': 1, 'bacon': 1, "
"'spam': 500}\n"
' >>> keys = dishes.keys()\n'
' >>> values = dishes.values()\n'
'\n'
' >>> # iteration\n'
' >>> n = 0\n'
' >>> for val in values:\n'
' ... n += val\n'
' >>> print(n)\n'
' 504\n'
'\n'
' >>> # keys and values are iterated over in the same '
'order\n'
' >>> list(keys)\n'
" ['eggs', 'bacon', 'sausage', 'spam']\n"
' >>> list(values)\n'
' [2, 1, 1, 500]\n'
'\n'
' >>> # view objects are dynamic and reflect dict changes\n'
" >>> del dishes['eggs']\n"
" >>> del dishes['sausage']\n"
' >>> list(keys)\n'
" ['spam', 'bacon']\n"
'\n'
' >>> # set operations\n'
" >>> keys & {'eggs', 'bacon', 'salad'}\n"
" {'bacon'}\n"
" >>> keys ^ {'sausage', 'juice'}\n"
" {'juice', 'sausage', 'bacon', 'spam'}\n",
'typesmethods': 'Methods\n'
'*******\n'
'\n'
'Methods are functions that are called using the attribute '
'notation.\n'
'There are two flavors: built-in methods (such as "append()" '
'on lists)\n'
'and class instance methods. Built-in methods are described '
'with the\n'
'types that support them.\n'
'\n'
'If you access a method (a function defined in a class '
'namespace)\n'
'through an instance, you get a special object: a *bound '
'method* (also\n'
'called *instance method*) object. When called, it will add '
'the "self"\n'
'argument to the argument list. Bound methods have two '
'special read-\n'
'only attributes: "m.__self__" is the object on which the '
'method\n'
'operates, and "m.__func__" is the function implementing the '
'method.\n'
'Calling "m(arg-1, arg-2, ..., arg-n)" is completely '
'equivalent to\n'
'calling "m.__func__(m.__self__, arg-1, arg-2, ..., arg-n)".\n'
'\n'
'Like function objects, bound method objects support getting '
'arbitrary\n'
'attributes. However, since method attributes are actually '
'stored on\n'
'the underlying function object ("meth.__func__"), setting '
'method\n'
'attributes on bound methods is disallowed. Attempting to '
'set an\n'
'attribute on a method results in an "AttributeError" being '
'raised. In\n'
'order to set a method attribute, you need to explicitly set '
'it on the\n'
'underlying function object:\n'
'\n'
' >>> class C:\n'
' ... def method(self):\n'
' ... pass\n'
' ...\n'
' >>> c = C()\n'
" >>> c.method.whoami = 'my name is method' # can't set on "
'the method\n'
' Traceback (most recent call last):\n'
' File "<stdin>", line 1, in <module>\n'
" AttributeError: 'method' object has no attribute "
"'whoami'\n"
" >>> c.method.__func__.whoami = 'my name is method'\n"
' >>> c.method.whoami\n'
" 'my name is method'\n"
'\n'
'See The standard type hierarchy for more information.\n',
'typesmodules': 'Modules\n'
'*******\n'
'\n'
'The only special operation on a module is attribute access: '
'"m.name",\n'
'where *m* is a module and *name* accesses a name defined in '
'*m*’s\n'
'symbol table. Module attributes can be assigned to. (Note '
'that the\n'
'"import" statement is not, strictly speaking, an operation '
'on a module\n'
'object; "import foo" does not require a module object named '
'*foo* to\n'
'exist, rather it requires an (external) *definition* for a '
'module\n'
'named *foo* somewhere.)\n'
'\n'
'A special attribute of every module is "__dict__". This is '
'the\n'
'dictionary containing the module’s symbol table. Modifying '
'this\n'
'dictionary will actually change the module’s symbol table, '
'but direct\n'
'assignment to the "__dict__" attribute is not possible (you '
'can write\n'
'"m.__dict__[\'a\'] = 1", which defines "m.a" to be "1", but '
'you can’t\n'
'write "m.__dict__ = {}"). Modifying "__dict__" directly is '
'not\n'
'recommended.\n'
'\n'
'Modules built into the interpreter are written like this: '
'"<module\n'
'\'sys\' (built-in)>". If loaded from a file, they are '
'written as\n'
'"<module \'os\' from '
'\'/usr/local/lib/pythonX.Y/os.pyc\'>".\n',
'typesseq': 'Sequence Types — "list", "tuple", "range"\n'
'*****************************************\n'
'\n'
'There are three basic sequence types: lists, tuples, and range\n'
'objects. Additional sequence types tailored for processing of '
'binary\n'
'data and text strings are described in dedicated sections.\n'
'\n'
'\n'
'Common Sequence Operations\n'
'==========================\n'
'\n'
'The operations in the following table are supported by most '
'sequence\n'
'types, both mutable and immutable. The '
'"collections.abc.Sequence" ABC\n'
'is provided to make it easier to correctly implement these '
'operations\n'
'on custom sequence types.\n'
'\n'
'This table lists the sequence operations sorted in ascending '
'priority.\n'
'In the table, *s* and *t* are sequences of the same type, *n*, '
'*i*,\n'
'*j* and *k* are integers and *x* is an arbitrary object that '
'meets any\n'
'type and value restrictions imposed by *s*.\n'
'\n'
'The "in" and "not in" operations have the same priorities as '
'the\n'
'comparison operations. The "+" (concatenation) and "*" '
'(repetition)\n'
'operations have the same priority as the corresponding numeric\n'
'operations. [3]\n'
'\n'
'+----------------------------+----------------------------------+------------+\n'
'| Operation | Result '
'| Notes |\n'
'+============================+==================================+============+\n'
'| "x in s" | "True" if an item of *s* is '
'| (1) |\n'
'| | equal to *x*, else "False" '
'| |\n'
'+----------------------------+----------------------------------+------------+\n'
'| "x not in s" | "False" if an item of *s* is '
'| (1) |\n'
'| | equal to *x*, else "True" '
'| |\n'
'+----------------------------+----------------------------------+------------+\n'
'| "s + t" | the concatenation of *s* and *t* '
'| (6)(7) |\n'
'+----------------------------+----------------------------------+------------+\n'
'| "s * n" or "n * s" | equivalent to adding *s* to '
'| (2)(7) |\n'
'| | itself *n* times '
'| |\n'
'+----------------------------+----------------------------------+------------+\n'
'| "s[i]" | *i*th item of *s*, origin 0 '
'| (3) |\n'
'+----------------------------+----------------------------------+------------+\n'
'| "s[i:j]" | slice of *s* from *i* to *j* '
'| (3)(4) |\n'
'+----------------------------+----------------------------------+------------+\n'
'| "s[i:j:k]" | slice of *s* from *i* to *j* '
'| (3)(5) |\n'
'| | with step *k* '
'| |\n'
'+----------------------------+----------------------------------+------------+\n'
'| "len(s)" | length of *s* '
'| |\n'
'+----------------------------+----------------------------------+------------+\n'
'| "min(s)" | smallest item of *s* '
'| |\n'
'+----------------------------+----------------------------------+------------+\n'
'| "max(s)" | largest item of *s* '
'| |\n'
'+----------------------------+----------------------------------+------------+\n'
'| "s.index(x[, i[, j]])" | index of the first occurrence of '
'| (8) |\n'
'| | *x* in *s* (at or after index '
'| |\n'
'| | *i* and before index *j*) '
'| |\n'
'+----------------------------+----------------------------------+------------+\n'
'| "s.count(x)" | total number of occurrences of '
'| |\n'
'| | *x* in *s* '
'| |\n'
'+----------------------------+----------------------------------+------------+\n'
'\n'
'Sequences of the same type also support comparisons. In '
'particular,\n'
'tuples and lists are compared lexicographically by comparing\n'
'corresponding elements. This means that to compare equal, every\n'
'element must compare equal and the two sequences must be of the '
'same\n'
'type and have the same length. (For full details see '
'Comparisons in\n'
'the language reference.)\n'
'\n'
'Notes:\n'
'\n'
'1. While the "in" and "not in" operations are used only for '
'simple\n'
' containment testing in the general case, some specialised '
'sequences\n'
' (such as "str", "bytes" and "bytearray") also use them for\n'
' subsequence testing:\n'
'\n'
' >>> "gg" in "eggs"\n'
' True\n'
'\n'
'2. Values of *n* less than "0" are treated as "0" (which yields '
'an\n'
' empty sequence of the same type as *s*). Note that items in '
'the\n'
' sequence *s* are not copied; they are referenced multiple '
'times.\n'
' This often haunts new Python programmers; consider:\n'
'\n'
' >>> lists = [[]] * 3\n'
' >>> lists\n'
' [[], [], []]\n'
' >>> lists[0].append(3)\n'
' >>> lists\n'
' [[3], [3], [3]]\n'
'\n'
' What has happened is that "[[]]" is a one-element list '
'containing\n'
' an empty list, so all three elements of "[[]] * 3" are '
'references\n'
' to this single empty list. Modifying any of the elements of\n'
' "lists" modifies this single list. You can create a list of\n'
' different lists this way:\n'
'\n'
' >>> lists = [[] for i in range(3)]\n'
' >>> lists[0].append(3)\n'
' >>> lists[1].append(5)\n'
' >>> lists[2].append(7)\n'
' >>> lists\n'
' [[3], [5], [7]]\n'
'\n'
' Further explanation is available in the FAQ entry How do I '
'create a\n'
' multidimensional list?.\n'
'\n'
'3. If *i* or *j* is negative, the index is relative to the end '
'of\n'
' sequence *s*: "len(s) + i" or "len(s) + j" is substituted. '
'But\n'
' note that "-0" is still "0".\n'
'\n'
'4. The slice of *s* from *i* to *j* is defined as the sequence '
'of\n'
' items with index *k* such that "i <= k < j". If *i* or *j* '
'is\n'
' greater than "len(s)", use "len(s)". If *i* is omitted or '
'"None",\n'
' use "0". If *j* is omitted or "None", use "len(s)". If *i* '
'is\n'
' greater than or equal to *j*, the slice is empty.\n'
'\n'
'5. The slice of *s* from *i* to *j* with step *k* is defined as '
'the\n'
' sequence of items with index "x = i + n*k" such that "0 <= n '
'<\n'
' (j-i)/k". In other words, the indices are "i", "i+k", '
'"i+2*k",\n'
' "i+3*k" and so on, stopping when *j* is reached (but never\n'
' including *j*). When *k* is positive, *i* and *j* are '
'reduced to\n'
' "len(s)" if they are greater. When *k* is negative, *i* and '
'*j* are\n'
' reduced to "len(s) - 1" if they are greater. If *i* or *j* '
'are\n'
' omitted or "None", they become “end” values (which end '
'depends on\n'
' the sign of *k*). Note, *k* cannot be zero. If *k* is '
'"None", it\n'
' is treated like "1".\n'
'\n'
'6. Concatenating immutable sequences always results in a new\n'
' object. This means that building up a sequence by repeated\n'
' concatenation will have a quadratic runtime cost in the '
'total\n'
' sequence length. To get a linear runtime cost, you must '
'switch to\n'
' one of the alternatives below:\n'
'\n'
' * if concatenating "str" objects, you can build a list and '
'use\n'
' "str.join()" at the end or else write to an "io.StringIO"\n'
' instance and retrieve its value when complete\n'
'\n'
' * if concatenating "bytes" objects, you can similarly use\n'
' "bytes.join()" or "io.BytesIO", or you can do in-place\n'
' concatenation with a "bytearray" object. "bytearray" '
'objects are\n'
' mutable and have an efficient overallocation mechanism\n'
'\n'
' * if concatenating "tuple" objects, extend a "list" instead\n'
'\n'
' * for other types, investigate the relevant class '
'documentation\n'
'\n'
'7. Some sequence types (such as "range") only support item\n'
' sequences that follow specific patterns, and hence don’t '
'support\n'
' sequence concatenation or repetition.\n'
'\n'
'8. "index" raises "ValueError" when *x* is not found in *s*. '
'Not\n'
' all implementations support passing the additional arguments '
'*i*\n'
' and *j*. These arguments allow efficient searching of '
'subsections\n'
' of the sequence. Passing the extra arguments is roughly '
'equivalent\n'
' to using "s[i:j].index(x)", only without copying any data and '
'with\n'
' the returned index being relative to the start of the '
'sequence\n'
' rather than the start of the slice.\n'
'\n'
'\n'
'Immutable Sequence Types\n'
'========================\n'
'\n'
'The only operation that immutable sequence types generally '
'implement\n'
'that is not also implemented by mutable sequence types is '
'support for\n'
'the "hash()" built-in.\n'
'\n'
'This support allows immutable sequences, such as "tuple" '
'instances, to\n'
'be used as "dict" keys and stored in "set" and "frozenset" '
'instances.\n'
'\n'
'Attempting to hash an immutable sequence that contains '
'unhashable\n'
'values will result in "TypeError".\n'
'\n'
'\n'
'Mutable Sequence Types\n'
'======================\n'
'\n'
'The operations in the following table are defined on mutable '
'sequence\n'
'types. The "collections.abc.MutableSequence" ABC is provided to '
'make\n'
'it easier to correctly implement these operations on custom '
'sequence\n'
'types.\n'
'\n'
'In the table *s* is an instance of a mutable sequence type, *t* '
'is any\n'
'iterable object and *x* is an arbitrary object that meets any '
'type and\n'
'value restrictions imposed by *s* (for example, "bytearray" '
'only\n'
'accepts integers that meet the value restriction "0 <= x <= '
'255").\n'
'\n'
'+--------------------------------+----------------------------------+-----------------------+\n'
'| Operation | '
'Result | Notes |\n'
'+================================+==================================+=======================+\n'
'| "s[i] = x" | item *i* of *s* is replaced '
'by | |\n'
'| | '
'*x* | |\n'
'+--------------------------------+----------------------------------+-----------------------+\n'
'| "s[i:j] = t" | slice of *s* from *i* to *j* '
'is | |\n'
'| | replaced by the contents of '
'the | |\n'
'| | iterable '
'*t* | |\n'
'+--------------------------------+----------------------------------+-----------------------+\n'
'| "del s[i:j]" | same as "s[i:j] = '
'[]" | |\n'
'+--------------------------------+----------------------------------+-----------------------+\n'
'| "s[i:j:k] = t" | the elements of "s[i:j:k]" '
'are | (1) |\n'
'| | replaced by those of '
'*t* | |\n'
'+--------------------------------+----------------------------------+-----------------------+\n'
'| "del s[i:j:k]" | removes the elements '
'of | |\n'
'| | "s[i:j:k]" from the '
'list | |\n'
'+--------------------------------+----------------------------------+-----------------------+\n'
'| "s.append(x)" | appends *x* to the end of '
'the | |\n'
'| | sequence (same '
'as | |\n'
'| | "s[len(s):len(s)] = '
'[x]") | |\n'
'+--------------------------------+----------------------------------+-----------------------+\n'
'| "s.clear()" | removes all items from "s" '
'(same | (5) |\n'
'| | as "del '
's[:]") | |\n'
'+--------------------------------+----------------------------------+-----------------------+\n'
'| "s.copy()" | creates a shallow copy of '
'"s" | (5) |\n'
'| | (same as '
'"s[:]") | |\n'
'+--------------------------------+----------------------------------+-----------------------+\n'
'| "s.extend(t)" or "s += t" | extends *s* with the contents '
'of | |\n'
'| | *t* (for the most part the '
'same | |\n'
'| | as "s[len(s):len(s)] = '
't") | |\n'
'+--------------------------------+----------------------------------+-----------------------+\n'
'| "s *= n" | updates *s* with its '
'contents | (6) |\n'
'| | repeated *n* '
'times | |\n'
'+--------------------------------+----------------------------------+-----------------------+\n'
'| "s.insert(i, x)" | inserts *x* into *s* at '
'the | |\n'
'| | index given by *i* (same '
'as | |\n'
'| | "s[i:i] = '
'[x]") | |\n'
'+--------------------------------+----------------------------------+-----------------------+\n'
'| "s.pop([i])" | retrieves the item at *i* '
'and | (2) |\n'
'| | also removes it from '
'*s* | |\n'
'+--------------------------------+----------------------------------+-----------------------+\n'
'| "s.remove(x)" | remove the first item from '
'*s* | (3) |\n'
'| | where "s[i] == '
'x" | |\n'
'+--------------------------------+----------------------------------+-----------------------+\n'
'| "s.reverse()" | reverses the items of *s* '
'in | (4) |\n'
'| | '
'place | |\n'
'+--------------------------------+----------------------------------+-----------------------+\n'
'\n'
'Notes:\n'
'\n'
'1. *t* must have the same length as the slice it is replacing.\n'
'\n'
'2. The optional argument *i* defaults to "-1", so that by '
'default\n'
' the last item is removed and returned.\n'
'\n'
'3. "remove" raises "ValueError" when *x* is not found in *s*.\n'
'\n'
'4. The "reverse()" method modifies the sequence in place for\n'
' economy of space when reversing a large sequence. To remind '
'users\n'
' that it operates by side effect, it does not return the '
'reversed\n'
' sequence.\n'
'\n'
'5. "clear()" and "copy()" are included for consistency with the\n'
' interfaces of mutable containers that don’t support slicing\n'
' operations (such as "dict" and "set")\n'
'\n'
' New in version 3.3: "clear()" and "copy()" methods.\n'
'\n'
'6. The value *n* is an integer, or an object implementing\n'
' "__index__()". Zero and negative values of *n* clear the '
'sequence.\n'
' Items in the sequence are not copied; they are referenced '
'multiple\n'
' times, as explained for "s * n" under Common Sequence '
'Operations.\n'
'\n'
'\n'
'Lists\n'
'=====\n'
'\n'
'Lists are mutable sequences, typically used to store collections '
'of\n'
'homogeneous items (where the precise degree of similarity will '
'vary by\n'
'application).\n'
'\n'
'class list([iterable])\n'
'\n'
' Lists may be constructed in several ways:\n'
'\n'
' * Using a pair of square brackets to denote the empty list: '
'"[]"\n'
'\n'
' * Using square brackets, separating items with commas: '
'"[a]",\n'
' "[a, b, c]"\n'
'\n'
' * Using a list comprehension: "[x for x in iterable]"\n'
'\n'
' * Using the type constructor: "list()" or "list(iterable)"\n'
'\n'
' The constructor builds a list whose items are the same and in '
'the\n'
' same order as *iterable*’s items. *iterable* may be either '
'a\n'
' sequence, a container that supports iteration, or an '
'iterator\n'
' object. If *iterable* is already a list, a copy is made and\n'
' returned, similar to "iterable[:]". For example, '
'"list(\'abc\')"\n'
' returns "[\'a\', \'b\', \'c\']" and "list( (1, 2, 3) )" '
'returns "[1, 2,\n'
' 3]". If no argument is given, the constructor creates a new '
'empty\n'
' list, "[]".\n'
'\n'
' Many other operations also produce lists, including the '
'"sorted()"\n'
' built-in.\n'
'\n'
' Lists implement all of the common and mutable sequence '
'operations.\n'
' Lists also provide the following additional method:\n'
'\n'
' sort(*, key=None, reverse=False)\n'
'\n'
' This method sorts the list in place, using only "<" '
'comparisons\n'
' between items. Exceptions are not suppressed - if any '
'comparison\n'
' operations fail, the entire sort operation will fail (and '
'the\n'
' list will likely be left in a partially modified state).\n'
'\n'
' "sort()" accepts two arguments that can only be passed by\n'
' keyword (keyword-only arguments):\n'
'\n'
' *key* specifies a function of one argument that is used '
'to\n'
' extract a comparison key from each list element (for '
'example,\n'
' "key=str.lower"). The key corresponding to each item in '
'the list\n'
' is calculated once and then used for the entire sorting '
'process.\n'
' The default value of "None" means that list items are '
'sorted\n'
' directly without calculating a separate key value.\n'
'\n'
' The "functools.cmp_to_key()" utility is available to '
'convert a\n'
' 2.x style *cmp* function to a *key* function.\n'
'\n'
' *reverse* is a boolean value. If set to "True", then the '
'list\n'
' elements are sorted as if each comparison were reversed.\n'
'\n'
' This method modifies the sequence in place for economy of '
'space\n'
' when sorting a large sequence. To remind users that it '
'operates\n'
' by side effect, it does not return the sorted sequence '
'(use\n'
' "sorted()" to explicitly request a new sorted list '
'instance).\n'
'\n'
' The "sort()" method is guaranteed to be stable. A sort '
'is\n'
' stable if it guarantees not to change the relative order '
'of\n'
' elements that compare equal — this is helpful for sorting '
'in\n'
' multiple passes (for example, sort by department, then by '
'salary\n'
' grade).\n'
'\n'
' **CPython implementation detail:** While a list is being '
'sorted,\n'
' the effect of attempting to mutate, or even inspect, the '
'list is\n'
' undefined. The C implementation of Python makes the list '
'appear\n'
' empty for the duration, and raises "ValueError" if it can '
'detect\n'
' that the list has been mutated during a sort.\n'
'\n'
'\n'
'Tuples\n'
'======\n'
'\n'
'Tuples are immutable sequences, typically used to store '
'collections of\n'
'heterogeneous data (such as the 2-tuples produced by the '
'"enumerate()"\n'
'built-in). Tuples are also used for cases where an immutable '
'sequence\n'
'of homogeneous data is needed (such as allowing storage in a '
'"set" or\n'
'"dict" instance).\n'
'\n'
'class tuple([iterable])\n'
'\n'
' Tuples may be constructed in a number of ways:\n'
'\n'
' * Using a pair of parentheses to denote the empty tuple: '
'"()"\n'
'\n'
' * Using a trailing comma for a singleton tuple: "a," or '
'"(a,)"\n'
'\n'
' * Separating items with commas: "a, b, c" or "(a, b, c)"\n'
'\n'
' * Using the "tuple()" built-in: "tuple()" or '
'"tuple(iterable)"\n'
'\n'
' The constructor builds a tuple whose items are the same and '
'in the\n'
' same order as *iterable*’s items. *iterable* may be either '
'a\n'
' sequence, a container that supports iteration, or an '
'iterator\n'
' object. If *iterable* is already a tuple, it is returned\n'
' unchanged. For example, "tuple(\'abc\')" returns "(\'a\', '
'\'b\', \'c\')"\n'
' and "tuple( [1, 2, 3] )" returns "(1, 2, 3)". If no argument '
'is\n'
' given, the constructor creates a new empty tuple, "()".\n'
'\n'
' Note that it is actually the comma which makes a tuple, not '
'the\n'
' parentheses. The parentheses are optional, except in the '
'empty\n'
' tuple case, or when they are needed to avoid syntactic '
'ambiguity.\n'
' For example, "f(a, b, c)" is a function call with three '
'arguments,\n'
' while "f((a, b, c))" is a function call with a 3-tuple as the '
'sole\n'
' argument.\n'
'\n'
' Tuples implement all of the common sequence operations.\n'
'\n'
'For heterogeneous collections of data where access by name is '
'clearer\n'
'than access by index, "collections.namedtuple()" may be a more\n'
'appropriate choice than a simple tuple object.\n'
'\n'
'\n'
'Ranges\n'
'======\n'
'\n'
'The "range" type represents an immutable sequence of numbers and '
'is\n'
'commonly used for looping a specific number of times in "for" '
'loops.\n'
'\n'
'class range(stop)\n'
'class range(start, stop[, step])\n'
'\n'
' The arguments to the range constructor must be integers '
'(either\n'
' built-in "int" or any object that implements the "__index__"\n'
' special method). If the *step* argument is omitted, it '
'defaults to\n'
' "1". If the *start* argument is omitted, it defaults to "0". '
'If\n'
' *step* is zero, "ValueError" is raised.\n'
'\n'
' For a positive *step*, the contents of a range "r" are '
'determined\n'
' by the formula "r[i] = start + step*i" where "i >= 0" and '
'"r[i] <\n'
' stop".\n'
'\n'
' For a negative *step*, the contents of the range are still\n'
' determined by the formula "r[i] = start + step*i", but the\n'
' constraints are "i >= 0" and "r[i] > stop".\n'
'\n'
' A range object will be empty if "r[0]" does not meet the '
'value\n'
' constraint. Ranges do support negative indices, but these '
'are\n'
' interpreted as indexing from the end of the sequence '
'determined by\n'
' the positive indices.\n'
'\n'
' Ranges containing absolute values larger than "sys.maxsize" '
'are\n'
' permitted but some features (such as "len()") may raise\n'
' "OverflowError".\n'
'\n'
' Range examples:\n'
'\n'
' >>> list(range(10))\n'
' [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n'
' >>> list(range(1, 11))\n'
' [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n'
' >>> list(range(0, 30, 5))\n'
' [0, 5, 10, 15, 20, 25]\n'
' >>> list(range(0, 10, 3))\n'
' [0, 3, 6, 9]\n'
' >>> list(range(0, -10, -1))\n'
' [0, -1, -2, -3, -4, -5, -6, -7, -8, -9]\n'
' >>> list(range(0))\n'
' []\n'
' >>> list(range(1, 0))\n'
' []\n'
'\n'
' Ranges implement all of the common sequence operations '
'except\n'
' concatenation and repetition (due to the fact that range '
'objects\n'
' can only represent sequences that follow a strict pattern '
'and\n'
' repetition and concatenation will usually violate that '
'pattern).\n'
'\n'
' start\n'
'\n'
' The value of the *start* parameter (or "0" if the '
'parameter was\n'
' not supplied)\n'
'\n'
' stop\n'
'\n'
' The value of the *stop* parameter\n'
'\n'
' step\n'
'\n'
' The value of the *step* parameter (or "1" if the parameter '
'was\n'
' not supplied)\n'
'\n'
'The advantage of the "range" type over a regular "list" or '
'"tuple" is\n'
'that a "range" object will always take the same (small) amount '
'of\n'
'memory, no matter the size of the range it represents (as it '
'only\n'
'stores the "start", "stop" and "step" values, calculating '
'individual\n'
'items and subranges as needed).\n'
'\n'
'Range objects implement the "collections.abc.Sequence" ABC, and\n'
'provide features such as containment tests, element index '
'lookup,\n'
'slicing and support for negative indices (see Sequence Types — '
'list,\n'
'tuple, range):\n'
'\n'
'>>> r = range(0, 20, 2)\n'
'>>> r\n'
'range(0, 20, 2)\n'
'>>> 11 in r\n'
'False\n'
'>>> 10 in r\n'
'True\n'
'>>> r.index(10)\n'
'5\n'
'>>> r[5]\n'
'10\n'
'>>> r[:5]\n'
'range(0, 10, 2)\n'
'>>> r[-1]\n'
'18\n'
'\n'
'Testing range objects for equality with "==" and "!=" compares '
'them as\n'
'sequences. That is, two range objects are considered equal if '
'they\n'
'represent the same sequence of values. (Note that two range '
'objects\n'
'that compare equal might have different "start", "stop" and '
'"step"\n'
'attributes, for example "range(0) == range(2, 1, 3)" or '
'"range(0, 3,\n'
'2) == range(0, 4, 2)".)\n'
'\n'
'Changed in version 3.2: Implement the Sequence ABC. Support '
'slicing\n'
'and negative indices. Test "int" objects for membership in '
'constant\n'
'time instead of iterating through all items.\n'
'\n'
'Changed in version 3.3: Define ‘==’ and ‘!=’ to compare range '
'objects\n'
'based on the sequence of values they define (instead of '
'comparing\n'
'based on object identity).\n'
'\n'
'New in version 3.3: The "start", "stop" and "step" attributes.\n'
'\n'
'See also:\n'
'\n'
' * The linspace recipe shows how to implement a lazy version '
'of\n'
' range that suitable for floating point applications.\n',
'typesseq-mutable': 'Mutable Sequence Types\n'
'**********************\n'
'\n'
'The operations in the following table are defined on '
'mutable sequence\n'
'types. The "collections.abc.MutableSequence" ABC is '
'provided to make\n'
'it easier to correctly implement these operations on '
'custom sequence\n'
'types.\n'
'\n'
'In the table *s* is an instance of a mutable sequence '
'type, *t* is any\n'
'iterable object and *x* is an arbitrary object that '
'meets any type and\n'
'value restrictions imposed by *s* (for example, '
'"bytearray" only\n'
'accepts integers that meet the value restriction "0 <= x '
'<= 255").\n'
'\n'
'+--------------------------------+----------------------------------+-----------------------+\n'
'| Operation | '
'Result | Notes '
'|\n'
'+================================+==================================+=======================+\n'
'| "s[i] = x" | item *i* of *s* is '
'replaced by | |\n'
'| | '
'*x* | '
'|\n'
'+--------------------------------+----------------------------------+-----------------------+\n'
'| "s[i:j] = t" | slice of *s* from *i* '
'to *j* is | |\n'
'| | replaced by the '
'contents of the | |\n'
'| | iterable '
'*t* | |\n'
'+--------------------------------+----------------------------------+-----------------------+\n'
'| "del s[i:j]" | same as "s[i:j] = '
'[]" | |\n'
'+--------------------------------+----------------------------------+-----------------------+\n'
'| "s[i:j:k] = t" | the elements of '
'"s[i:j:k]" are | (1) |\n'
'| | replaced by those of '
'*t* | |\n'
'+--------------------------------+----------------------------------+-----------------------+\n'
'| "del s[i:j:k]" | removes the elements '
'of | |\n'
'| | "s[i:j:k]" from the '
'list | |\n'
'+--------------------------------+----------------------------------+-----------------------+\n'
'| "s.append(x)" | appends *x* to the '
'end of the | |\n'
'| | sequence (same '
'as | |\n'
'| | "s[len(s):len(s)] = '
'[x]") | |\n'
'+--------------------------------+----------------------------------+-----------------------+\n'
'| "s.clear()" | removes all items '
'from "s" (same | (5) |\n'
'| | as "del '
's[:]") | |\n'
'+--------------------------------+----------------------------------+-----------------------+\n'
'| "s.copy()" | creates a shallow '
'copy of "s" | (5) |\n'
'| | (same as '
'"s[:]") | |\n'
'+--------------------------------+----------------------------------+-----------------------+\n'
'| "s.extend(t)" or "s += t" | extends *s* with the '
'contents of | |\n'
'| | *t* (for the most '
'part the same | |\n'
'| | as "s[len(s):len(s)] '
'= t") | |\n'
'+--------------------------------+----------------------------------+-----------------------+\n'
'| "s *= n" | updates *s* with its '
'contents | (6) |\n'
'| | repeated *n* '
'times | |\n'
'+--------------------------------+----------------------------------+-----------------------+\n'
'| "s.insert(i, x)" | inserts *x* into *s* '
'at the | |\n'
'| | index given by *i* '
'(same as | |\n'
'| | "s[i:i] = '
'[x]") | |\n'
'+--------------------------------+----------------------------------+-----------------------+\n'
'| "s.pop([i])" | retrieves the item at '
'*i* and | (2) |\n'
'| | also removes it from '
'*s* | |\n'
'+--------------------------------+----------------------------------+-----------------------+\n'
'| "s.remove(x)" | remove the first item '
'from *s* | (3) |\n'
'| | where "s[i] == '
'x" | |\n'
'+--------------------------------+----------------------------------+-----------------------+\n'
'| "s.reverse()" | reverses the items of '
'*s* in | (4) |\n'
'| | '
'place | '
'|\n'
'+--------------------------------+----------------------------------+-----------------------+\n'
'\n'
'Notes:\n'
'\n'
'1. *t* must have the same length as the slice it is '
'replacing.\n'
'\n'
'2. The optional argument *i* defaults to "-1", so that '
'by default\n'
' the last item is removed and returned.\n'
'\n'
'3. "remove" raises "ValueError" when *x* is not found in '
'*s*.\n'
'\n'
'4. The "reverse()" method modifies the sequence in place '
'for\n'
' economy of space when reversing a large sequence. To '
'remind users\n'
' that it operates by side effect, it does not return '
'the reversed\n'
' sequence.\n'
'\n'
'5. "clear()" and "copy()" are included for consistency '
'with the\n'
' interfaces of mutable containers that don’t support '
'slicing\n'
' operations (such as "dict" and "set")\n'
'\n'
' New in version 3.3: "clear()" and "copy()" methods.\n'
'\n'
'6. The value *n* is an integer, or an object '
'implementing\n'
' "__index__()". Zero and negative values of *n* clear '
'the sequence.\n'
' Items in the sequence are not copied; they are '
'referenced multiple\n'
' times, as explained for "s * n" under Common Sequence '
'Operations.\n',
'unary': 'Unary arithmetic and bitwise operations\n'
'***************************************\n'
'\n'
'All unary arithmetic and bitwise operations have the same '
'priority:\n'
'\n'
' u_expr ::= power | "-" u_expr | "+" u_expr | "~" u_expr\n'
'\n'
'The unary "-" (minus) operator yields the negation of its numeric\n'
'argument.\n'
'\n'
'The unary "+" (plus) operator yields its numeric argument '
'unchanged.\n'
'\n'
'The unary "~" (invert) operator yields the bitwise inversion of '
'its\n'
'integer argument. The bitwise inversion of "x" is defined as\n'
'"-(x+1)". It only applies to integral numbers.\n'
'\n'
'In all three cases, if the argument does not have the proper type, '
'a\n'
'"TypeError" exception is raised.\n',
'while': 'The "while" statement\n'
'*********************\n'
'\n'
'The "while" statement is used for repeated execution as long as an\n'
'expression is true:\n'
'\n'
' while_stmt ::= "while" expression ":" suite\n'
' ["else" ":" suite]\n'
'\n'
'This repeatedly tests the expression and, if it is true, executes '
'the\n'
'first suite; if the expression is false (which may be the first '
'time\n'
'it is tested) the suite of the "else" clause, if present, is '
'executed\n'
'and the loop terminates.\n'
'\n'
'A "break" statement executed in the first suite terminates the '
'loop\n'
'without executing the "else" clause’s suite. A "continue" '
'statement\n'
'executed in the first suite skips the rest of the suite and goes '
'back\n'
'to testing the expression.\n',
'with': 'The "with" statement\n'
'********************\n'
'\n'
'The "with" statement is used to wrap the execution of a block with\n'
'methods defined by a context manager (see section With Statement\n'
'Context Managers). This allows common "try"…"except"…"finally" '
'usage\n'
'patterns to be encapsulated for convenient reuse.\n'
'\n'
' with_stmt ::= "with" with_item ("," with_item)* ":" suite\n'
' with_item ::= expression ["as" target]\n'
'\n'
'The execution of the "with" statement with one “item” proceeds as\n'
'follows:\n'
'\n'
'1. The context expression (the expression given in the "with_item")\n'
' is evaluated to obtain a context manager.\n'
'\n'
'2. The context manager’s "__exit__()" is loaded for later use.\n'
'\n'
'3. The context manager’s "__enter__()" method is invoked.\n'
'\n'
'4. If a target was included in the "with" statement, the return\n'
' value from "__enter__()" is assigned to it.\n'
'\n'
' Note: The "with" statement guarantees that if the "__enter__()"\n'
' method returns without an error, then "__exit__()" will always '
'be\n'
' called. Thus, if an error occurs during the assignment to the\n'
' target list, it will be treated the same as an error occurring\n'
' within the suite would be. See step 6 below.\n'
'\n'
'5. The suite is executed.\n'
'\n'
'6. The context manager’s "__exit__()" method is invoked. If an\n'
' exception caused the suite to be exited, its type, value, and\n'
' traceback are passed as arguments to "__exit__()". Otherwise, '
'three\n'
' "None" arguments are supplied.\n'
'\n'
' If the suite was exited due to an exception, and the return '
'value\n'
' from the "__exit__()" method was false, the exception is '
'reraised.\n'
' If the return value was true, the exception is suppressed, and\n'
' execution continues with the statement following the "with"\n'
' statement.\n'
'\n'
' If the suite was exited for any reason other than an exception, '
'the\n'
' return value from "__exit__()" is ignored, and execution '
'proceeds\n'
' at the normal location for the kind of exit that was taken.\n'
'\n'
'With more than one item, the context managers are processed as if\n'
'multiple "with" statements were nested:\n'
'\n'
' with A() as a, B() as b:\n'
' suite\n'
'\n'
'is equivalent to\n'
'\n'
' with A() as a:\n'
' with B() as b:\n'
' suite\n'
'\n'
'Changed in version 3.1: Support for multiple context expressions.\n'
'\n'
'See also:\n'
'\n'
' **PEP 343** - The “with” statement\n'
' The specification, background, and examples for the Python '
'"with"\n'
' statement.\n',
'yield': 'The "yield" statement\n'
'*********************\n'
'\n'
' yield_stmt ::= yield_expression\n'
'\n'
'A "yield" statement is semantically equivalent to a yield '
'expression.\n'
'The yield statement can be used to omit the parentheses that would\n'
'otherwise be required in the equivalent yield expression '
'statement.\n'
'For example, the yield statements\n'
'\n'
' yield <expr>\n'
' yield from <expr>\n'
'\n'
'are equivalent to the yield expression statements\n'
'\n'
' (yield <expr>)\n'
' (yield from <expr>)\n'
'\n'
'Yield expressions and statements are only used when defining a\n'
'*generator* function, and are only used in the body of the '
'generator\n'
'function. Using yield in a function definition is sufficient to '
'cause\n'
'that definition to create a generator function instead of a normal\n'
'function.\n'
'\n'
'For full details of "yield" semantics, refer to the Yield '
'expressions\n'
'section.\n'}
| 50.180851 | 119 | 0.429521 |
4a1fd5da9f04dda336f8393060407a6bfe2b31c3 | 14,136 | py | Python | cirq-core/cirq/sim/sparse_simulator.py | Zshan0/Cirq | 93bbaa853305faa65117bcbdc2063f741cb2977c | [
"Apache-2.0"
] | 1 | 2020-06-11T19:31:25.000Z | 2020-06-11T19:31:25.000Z | cirq-core/cirq/sim/sparse_simulator.py | Zshan0/Cirq | 93bbaa853305faa65117bcbdc2063f741cb2977c | [
"Apache-2.0"
] | null | null | null | cirq-core/cirq/sim/sparse_simulator.py | Zshan0/Cirq | 93bbaa853305faa65117bcbdc2063f741cb2977c | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simulator that uses numpy's einsum for sparse matrix operations."""
from typing import (
Any,
Dict,
Iterator,
List,
Type,
TYPE_CHECKING,
Union,
Sequence,
Optional,
)
import numpy as np
from cirq import ops, protocols, qis
from cirq.sim import (
simulator,
state_vector,
state_vector_simulator,
act_on_state_vector_args,
)
if TYPE_CHECKING:
import cirq
from numpy.typing import DTypeLike
class Simulator(
state_vector_simulator.SimulatesIntermediateStateVector['SparseSimulatorStep'],
simulator.SimulatesExpectationValues,
):
"""A sparse matrix state vector simulator that uses numpy.
This simulator can be applied on circuits that are made up of operations
that have a `_unitary_` method, or `_has_unitary_` and
`_apply_unitary_`, `_mixture_` methods, are measurements, or support a
`_decompose_` method that returns operations satisfying these same
conditions. That is to say, the operations should follow the
`cirq.SupportsConsistentApplyUnitary` protocol, the `cirq.SupportsUnitary`
protocol, the `cirq.SupportsMixture` protocol, or the
`cirq.CompositeOperation` protocol. It is also permitted for the circuit
to contain measurements which are operations that support
`cirq.SupportsKraus` and `cirq.SupportsMeasurementKey`
This simulator supports four types of simulation.
Run simulations which mimic running on actual quantum hardware. These
simulations do not give access to the state vector (like actual hardware).
There are two variations of run methods, one which takes in a single
(optional) way to resolve parameterized circuits, and a second which
takes in a list or sweep of parameter resolver:
run(circuit, param_resolver, repetitions)
run_sweep(circuit, params, repetitions)
The simulation performs optimizations if the number of repetitions is
greater than one and all measurements in the circuit are terminal (at the
end of the circuit). These methods return `Result`s which contain both
the measurement results, but also the parameters used for the parameterized
circuit operations. The initial state of a run is always the all 0s state
in the computational basis.
By contrast the simulate methods of the simulator give access to the
state vector of the simulation at the end of the simulation of the circuit.
These methods take in two parameters that the run methods do not: a
qubit order and an initial state. The qubit order is necessary because an
ordering must be chosen for the kronecker product (see
`SparseSimulationTrialResult` for details of this ordering). The initial
state can be either the full state vector, or an integer which represents
the initial state of being in a computational basis state for the binary
representation of that integer. Similar to run methods, there are two
simulate methods that run for single runs or for sweeps across different
parameters:
simulate(circuit, param_resolver, qubit_order, initial_state)
simulate_sweep(circuit, params, qubit_order, initial_state)
The simulate methods in contrast to the run methods do not perform
repetitions. The result of these simulations is a
`SparseSimulationTrialResult` which contains, in addition to measurement
results and information about the parameters that were used in the
simulation,access to the state via the `state` method and `StateVectorMixin`
methods.
If one wishes to perform simulations that have access to the
state vector as one steps through running the circuit there is a generator
which can be iterated over and each step is an object that gives access
to the state vector. This stepping through a `Circuit` is done on a
`Moment` by `Moment` manner.
simulate_moment_steps(circuit, param_resolver, qubit_order,
initial_state)
One can iterate over the moments via
for step_result in simulate_moments(circuit):
# do something with the state vector via step_result.state_vector
Note also that simulations can be stochastic, i.e. return different results
for different runs. The first version of this occurs for measurements,
where the results of the measurement are recorded. This can also
occur when the circuit has mixtures of unitaries.
If only the expectation values for some observables on the final state are
required, there are methods for that as well. These methods take a mapping
of names to observables, and return a map (or list of maps) of those names
to the corresponding expectation values.
simulate_expectation_values(circuit, observables, param_resolver,
qubit_order, initial_state,
permit_terminal_measurements)
simulate_expectation_values_sweep(circuit, observables, params,
qubit_order, initial_state,
permit_terminal_measurements)
Expectation values generated by these methods are exact (up to precision of
the floating-point type used); the closest analogy on hardware requires
estimating the expectation values from several samples.
See `Simulator` for the definitions of the supported methods.
"""
def __init__(
self,
*,
dtype: Type[np.number] = np.complex64,
noise: 'cirq.NOISE_MODEL_LIKE' = None,
seed: 'cirq.RANDOM_STATE_OR_SEED_LIKE' = None,
split_untangled_states: bool = True,
):
"""A sparse matrix simulator.
Args:
dtype: The `numpy.dtype` used by the simulation. One of
`numpy.complex64` or `numpy.complex128`.
noise: A noise model to apply while simulating.
seed: The random seed to use for this simulator.
split_untangled_states: If True, optimizes simulation by running
unentangled qubit sets independently and merging those states
at the end.
Raises:
ValueError: If the given dtype is not complex.
"""
if np.dtype(dtype).kind != 'c':
raise ValueError(f'dtype must be a complex type but was {dtype}')
super().__init__(
dtype=dtype,
noise=noise,
seed=seed,
split_untangled_states=split_untangled_states,
)
def _create_partial_act_on_args(
self,
initial_state: Union['cirq.STATE_VECTOR_LIKE', 'cirq.ActOnStateVectorArgs'],
qubits: Sequence['cirq.Qid'],
logs: Dict[str, Any],
):
"""Creates the ActOnStateVectorArgs for a circuit.
Args:
initial_state: The initial state for the simulation in the
computational basis.
qubits: Determines the canonical ordering of the qubits. This
is often used in specifying the initial state, i.e. the
ordering of the computational basis states.
logs: Log of the measurement results.
Returns:
ActOnStateVectorArgs for the circuit.
"""
if isinstance(initial_state, act_on_state_vector_args.ActOnStateVectorArgs):
return initial_state
qid_shape = protocols.qid_shape(qubits)
state = qis.to_valid_state_vector(
initial_state, len(qubits), qid_shape=qid_shape, dtype=self._dtype
)
return act_on_state_vector_args.ActOnStateVectorArgs(
target_tensor=np.reshape(state, qid_shape),
available_buffer=np.empty(qid_shape, dtype=self._dtype),
qubits=qubits,
prng=self._prng,
log_of_measurement_results=logs,
)
def _create_step_result(
self,
sim_state: 'cirq.OperationTarget[cirq.ActOnStateVectorArgs]',
):
return SparseSimulatorStep(
sim_state=sim_state,
simulator=self,
dtype=self._dtype,
)
def simulate_expectation_values_sweep_iter(
self,
program: 'cirq.AbstractCircuit',
observables: Union['cirq.PauliSumLike', List['cirq.PauliSumLike']],
params: 'cirq.Sweepable',
qubit_order: 'cirq.QubitOrderOrList' = ops.QubitOrder.DEFAULT,
initial_state: Any = None,
permit_terminal_measurements: bool = False,
) -> Iterator[List[float]]:
if not permit_terminal_measurements and program.are_any_measurements_terminal():
raise ValueError(
'Provided circuit has terminal measurements, which may '
'skew expectation values. If this is intentional, set '
'permit_terminal_measurements=True.'
)
qubit_order = ops.QubitOrder.as_qubit_order(qubit_order)
qmap = {q: i for i, q in enumerate(qubit_order.order_for(program.all_qubits()))}
if not isinstance(observables, List):
observables = [observables]
pslist = [ops.PauliSum.wrap(pslike) for pslike in observables]
yield from (
[obs.expectation_from_state_vector(result.final_state_vector, qmap) for obs in pslist]
for result in self.simulate_sweep_iter(
program, params, qubit_order=qubit_order, initial_state=initial_state
)
)
class SparseSimulatorStep(
state_vector.StateVectorMixin,
state_vector_simulator.StateVectorStepResult,
):
"""A `StepResult` that includes `StateVectorMixin` methods."""
def __init__(
self,
sim_state: 'cirq.OperationTarget[cirq.ActOnStateVectorArgs]',
simulator: 'cirq.Simulator' = None,
dtype: 'DTypeLike' = np.complex64,
):
"""Results of a step of the simulator.
Args:
sim_state: The qubit:ActOnArgs lookup for this step.
simulator: The simulator used to create this.
dtype: The `numpy.dtype` used by the simulation. One of
`numpy.complex64` or `numpy.complex128`.
"""
qubit_map = {q: i for i, q in enumerate(sim_state.qubits)}
super().__init__(sim_state=sim_state, qubit_map=qubit_map)
self._dtype = dtype
self._state_vector: Optional[np.ndarray] = None
self._simulator = simulator
def _simulator_state(self) -> 'cirq.StateVectorSimulatorState':
return state_vector_simulator.StateVectorSimulatorState(
qubit_map=self.qubit_map, state_vector=self.state_vector(copy=False)
)
def state_vector(self, copy: bool = True):
"""Return the state vector at this point in the computation.
The state is returned in the computational basis with these basis
states defined by the qubit_map. In particular the value in the
qubit_map is the index of the qubit, and these are translated into
binary vectors where the last qubit is the 1s bit of the index, the
second-to-last is the 2s bit of the index, and so forth (i.e. big
endian ordering).
Example:
qubit_map: {QubitA: 0, QubitB: 1, QubitC: 2}
Then the returned vector will have indices mapped to qubit basis
states like the following table
| | QubitA | QubitB | QubitC |
| :-: | :----: | :----: | :----: |
| 0 | 0 | 0 | 0 |
| 1 | 0 | 0 | 1 |
| 2 | 0 | 1 | 0 |
| 3 | 0 | 1 | 1 |
| 4 | 1 | 0 | 0 |
| 5 | 1 | 0 | 1 |
| 6 | 1 | 1 | 0 |
| 7 | 1 | 1 | 1 |
Args:
copy: If True, then the returned state is a copy of the state
vector. If False, then the state vector is not copied,
potentially saving memory. If one only needs to read derived
parameters from the state vector and store then using False
can speed up simulation by eliminating a memory copy.
"""
if self._state_vector is None:
self._state_vector = np.array([1])
state = self._merged_sim_state
if state is not None:
vector = state.target_tensor
size = np.prod(vector.shape, dtype=np.int64)
self._state_vector = np.reshape(vector, size)
return self._state_vector.copy() if copy else self._state_vector
def set_state_vector(self, state: 'cirq.STATE_VECTOR_LIKE'):
"""Set the state vector.
One can pass a valid full state to this method by passing a numpy
array. Or, alternatively, one can pass an integer, and then the state
will be set to lie entirely in the computation basis state for the
binary expansion of the passed integer.
Args:
state: If an int, the state vector set is the state vector
corresponding to a computational basis state. If a numpy
array this is the full state vector.
"""
if self._simulator:
self._sim_state = self._simulator._create_act_on_args(state, self._qubits)
def __repr__(self) -> str:
return (
f'cirq.SparseSimulatorStep(sim_state={self._sim_state!r},'
f' dtype=np.{self._dtype.__name__})'
)
| 41.576471 | 98 | 0.653509 |
4a1fd610012ad54cce222cd1775448a1288cd4fc | 377 | py | Python | RiskQuantLib/Property/average.py | SyuyaMurakami/RiskQuantLib-Doc | 2503befc24c2e422e51f8b9f468c8d8439e11c65 | [
"MIT"
] | 1 | 2021-12-29T12:18:45.000Z | 2021-12-29T12:18:45.000Z | RiskQuantLib/Property/average.py | SyuyaMurakami/RiskQuantLib-Doc | 2503befc24c2e422e51f8b9f468c8d8439e11c65 | [
"MIT"
] | null | null | null | RiskQuantLib/Property/average.py | SyuyaMurakami/RiskQuantLib-Doc | 2503befc24c2e422e51f8b9f468c8d8439e11c65 | [
"MIT"
] | 1 | 2021-12-08T02:14:34.000Z | 2021-12-08T02:14:34.000Z | #!/usr/bin/python
#coding = utf-8
class average:
"""
This is the base class of average. Inherit from this class will make
attribute into the average version.
"""
def __init__(self,numberOfSamplesNum):
self.numberOfSamples = numberOfSamplesNum
def setNumberOfSamples(self,numberOfSamplesNum):
self.numberOfSamples = numberOfSamplesNum
| 25.133333 | 72 | 0.713528 |
4a1fd6204590cd10b6faa498624ae943476c1b47 | 14,632 | py | Python | pysat/pb.py | mvcisback/pysat | 5326a96af2bac1804c7e173f6d7d01f42e86ab4f | [
"MIT"
] | null | null | null | pysat/pb.py | mvcisback/pysat | 5326a96af2bac1804c7e173f6d7d01f42e86ab4f | [
"MIT"
] | null | null | null | pysat/pb.py | mvcisback/pysat | 5326a96af2bac1804c7e173f6d7d01f42e86ab4f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#-*- coding:utf-8 -*-
##
## pb.py
##
## Created on: Mar 13, 2019
## Author: Alexey S. Ignatiev
## E-mail: [email protected]
##
"""
===============
List of classes
===============
.. autosummary::
:nosignatures:
EncType
PBEnc
==================
Module description
==================
This module provides access to the basic functionality of the `PyPBLib
library <https://pypi.org/project/pypblib/>`__ developed by the `Logic
Optimization Group <http://ulog.udl.cat/>`__ of the University of Lleida.
PyPBLib provides a user with an extensive Python API to the well-known
`PBLib library <http://tools.computational-logic.org/content/pblib.php>`__
[1]_. Note the PyPBLib has a number of `additional features
<http://hardlog.udl.cat/static/doc/pypblib/html/index.html>`__ that cannot
be accessed through PySAT *at this point*. (One concrete example is a
range of cardinality encodings, which clash with the internal
:mod:`pysat.card` module.) If a user needs some functionality of PyPBLib
missing in this module, he/she may apply PyPBLib as a standalone library,
when working with PySAT.
.. [1] Tobias Philipp, Peter Steinke. *PBLib - A Library for Encoding
Pseudo-Boolean Constraints into CNF*. SAT 2015. pp. 9-16
A *pseudo-Boolean constraint* is a constraint of the form:
:math:`\left(\sum_{i=1}^n{a_i\cdot x_i}\\right)\circ k`, where
:math:`a_i\in\mathbb{N}`, :math:`x_i\in\{y_i,\\neg{y_i}\}`,
:math:`y_i\in\mathbb{B}`, and :math:`\circ\in\{\leq,=,\geq\}`.
Pseudo-Boolean constraints arise in a number of important practical
applications. Thus, several *encodings* of pseudo-Boolean constraints into
CNF formulas are known [2]_. The list of pseudo-Boolean encodings
supported by this module include BDD [3]_ [4]_, sequential weight counters
[5]_, sorting networks [3]_, adder networks [3]_, and binary merge [6]_.
Access to all cardinality encodings can be made through the main class of
this module, which is :class:`.PBEnc`.
.. [2] Olivier Roussel, Vasco M. Manquinho. *Pseudo-Boolean and
Cardinality Constraints*. Handbook of Satisfiability. 2009.
pp. 695-733
.. [3] Niklas Eén, Niklas Sörensson. *Translating Pseudo-Boolean
Constraints into SAT*. JSAT. vol. 2(1-4). 2006. pp. 1-26
.. [4] Ignasi Abío, Robert Nieuwenhuis, Albert Oliveras,
Enric Rodríguez-Carbonell. *BDDs for Pseudo-Boolean Constraints -
Revisited*. SAT. 2011. pp. 61-75
.. [5] Steffen Hölldobler, Norbert Manthey, Peter Steinke. *A Compact
Encoding of Pseudo-Boolean Constraints into SAT*. KI. 2012.
pp. 107-118
.. [6] Norbert Manthey, Tobias Philipp, Peter Steinke. *A More Compact
Translation of Pseudo-Boolean Constraints into CNF Such That
Generalized Arc Consistency Is Maintained*. KI. 2014. pp. 123-134
==============
Module details
==============
"""
#
#==============================================================================
import math
from pypblib import pblib
from pysat.formula import CNF
#
#==============================================================================
class NoSuchEncodingError(Exception):
"""
This exception is raised when creating an unknown LEQ, GEQ, or Equals
constraint encoding.
"""
pass
#
#==============================================================================
class EncType(object):
"""
This class represents a C-like ``enum`` type for choosing the
pseudo-Boolean encoding to use. The values denoting the encodings are:
::
best = 0
bdd = 1
seqcounter = 2
sortnetwrk = 3
adder = 4
binmerge = 5
The desired encoding can be selected either directly by its integer
identifier, e.g. ``2``, or by its alphabetical name, e.g.
``EncType.seqcounter``.
All the encodings are produced and returned as a list of clauses in
the :class:`pysat.formula.CNF` format.
Note that the encoding type can be set to ``best``, in which case the
encoder selects one of the other encodings from the list (in most
cases, this invokes the ``bdd`` encoder).
"""
best = 0
bdd = 1
seqcounter = 2
sortnetwrk = 3
adder = 4
binmerge = 5
# mapping from internal encoding identifiers to the ones of PyPBLib
_to_pbenc = {
best: pblib.PB_BEST,
bdd: pblib.PB_BDD,
seqcounter: pblib.PB_SWC,
sortnetwrk: pblib.PB_SORTINGNETWORKS,
adder: pblib.PB_ADDER,
binmerge: pblib.PB_BINARY_MERGE
}
# mapping from internal comparator identifiers to the ones of PyPBLib
_to_pbcmp = {
'<': pblib.LEQ,
'>': pblib.GEQ,
'=': pblib.BOTH
}
#
#==============================================================================
class PBEnc(object):
"""
Abstract class responsible for the creation of pseudo-Boolean
constraints encoded to a CNF formula. The class has three main *class
methods* for creating LEQ, GEQ, and Equals constraints. Given (1)
either a list of weighted literals or a list of unweighted literals
followed by a list of weights, (2) an integer bound and an encoding
type, each of these methods returns an object of class
:class:`pysat.formula.CNF` representing the resulting CNF formula.
Since the class is abstract, there is no need to create an object of
it. Instead, the methods should be called directly as class methods,
e.g. ``PBEnc.atmost(wlits, bound)`` or ``PBEnc.equals(lits, weights,
bound)``. An example usage is the following:
.. code-block:: python
>>> from pysat.pb import *
>>> cnf = PBEnc.atmost(lits=[1, 2, 3], weights=[1, 2, 3], bound=3)
>>> print cnf.clauses
[[4], [-1, -5], [-2, -5], [5, -3, -6], [6]]
>>> cnf = PBEnc.equals(lits=[1, 2, 3], weights=[1, 2, 3], bound=3, encoding=EncType.bdd)
>>> print cnf.clauses
[[4], [-5, -2], [-5, 2, -1], [-5, -1], [-6, 1], [-7, -2, 6], [-7, 2], [-7, 6], [-8, -3, 5], [-8, 3, 7], [-8, 5, 7], [8]]
"""
@classmethod
def _update_vids(cls, cnf, vpool):
"""
Update variable ids in the given formula and id pool.
:param cnf: a list of literals in the sum.
:param vpool: the value of bound :math:`k`.
:type cnf: :class:`.formula.CNF`
:type vpool: :class:`.formula.IDPool`
"""
top, vmap = vpool.top, {} # current top and variable mapping
# creating a new variable mapping, taking into
# account variables marked as "occupied"
while top < cnf.nv:
top += 1
vpool.top += 1
while vpool._occupied and vpool.top >= vpool._occupied[0][0]:
if vpool.top <= vpool._occupied[0][1] + 1:
vpool.top = vpool._occupied[0][1] + 1
vpool._occupied.pop(0)
vmap[top] = vpool.top
# updating the clauses
for cl in cnf.clauses:
cl[:] = map(lambda l: int(math.copysign(vmap[abs(l)], l)) if abs(l) in vmap else l, cl)
# updating the number of variables
cnf.nv = vpool.top
@classmethod
def _encode(cls, lits, weights=None, bound=1, top_id=None, vpool=None,
encoding=EncType.best, comparator='<'):
"""
This is the method that wraps the encoder of PyPBLib. Although the
method can be invoked directly, a user is expected to call one of
the following methods instead: :meth:`atmost`, :meth:`atleast`, or
:meth:`equals`.
The list of literals can contain either integers or pairs ``(l,
w)``, where ``l`` is an integer literal and ``w`` is an integer
weight. The latter can be done only if no ``weights`` are
specified separately.
:param lits: a list of literals in the sum.
:param weights: a list of weights
:param bound: the value of bound :math:`k`.
:param top_id: top variable identifier used so far.
:param vpool: variable pool for counting the number of variables.
:param encoding: identifier of the encoding to use.
:param comparator: identifier of the comparison operator
:type lits: iterable(int)
:type weights: iterable(int)
:type bound: int
:type top_id: integer or None
:type vpool: :class:`.IDPool`
:type encoding: integer
:type comparator: str
:rtype: :class:`pysat.formula.CNF`
"""
if encoding < 0 or encoding > 5:
raise(NoSuchEncodingError(encoding))
assert lits, 'No literals are provided.'
assert not top_id or not vpool, \
'Use either a top id or a pool of variables but not both.'
# preparing weighted literals
if weights:
assert len(lits) == len(weights), 'Same number of literals and weights is expected.'
wlits = [pblib.WeightedLit(l, w) for l, w in zip(lits, weights)]
else:
if all(map(lambda lw: (type(lw) in (list, tuple)) and len(lw) == 2, lits)):
# literals are already weighted
wlits = [pblib.WeightedLit(*wl) for wl in lits]
lits = zip(*lits)[0] # unweighted literals for getting top_id
elif all(map(lambda l: type(l) is int, lits)):
# no weights are provided => all weights are units
wlits = [pblib.WeightedLit(l, 1) for l in lits]
else:
assert 0, 'Incorrect literals given.'
# obtaining the top id from the variable pool
if vpool:
top_id = vpool.top
if not top_id:
top_id = max(map(lambda x: abs(x), lits))
# pseudo-Boolean constraint and variable manager
constr = pblib.PBConstraint(wlits, EncType._to_pbcmp[comparator], bound)
varmgr = pblib.AuxVarManager(top_id + 1)
# encoder configuration
config = pblib.PBConfig()
config.set_PB_Encoder(EncType._to_pbenc[encoding])
# encoding
result = pblib.VectorClauseDatabase(config)
pb2cnf = pblib.Pb2cnf(config)
pb2cnf.encode(constr, result, varmgr)
# extracting clauses
ret = CNF(from_clauses=result.get_clauses())
# updating vpool if necessary
if vpool:
if vpool._occupied and vpool.top <= vpool._occupied[0][0] <= ret.nv:
cls._update_vids(ret, vpool)
else:
vpool.top = ret.nv - 1
vpool._next()
return ret
@classmethod
def leq(cls, lits, weights=None, bound=1, top_id=None, vpool=None,
encoding=EncType.best):
"""
This method can be used for creating a CNF encoding of a LEQ
(weighted AtMostK) constraint, i.e. of
:math:`\sum_{i=1}^{n}{a_i\cdot x_i}\leq k`. The resulting set of
clauses is returned as an object of class :class:`.formula.CNF`.
The input list of literals can contain either integers or pairs
``(l, w)``, where ``l`` is an integer literal and ``w`` is an
integer weight. The latter can be done only if no ``weights`` are
specified separately. The type of encoding to use can be specified
using the ``encoding`` parameter. By default, it is set to
``EncType.best``, i.e. it is up to the PBLib encoder to choose the
encoding type.
:param lits: a list of literals in the sum.
:param weights: a list of weights
:param bound: the value of bound :math:`k`.
:param top_id: top variable identifier used so far.
:param vpool: variable pool for counting the number of variables.
:param encoding: identifier of the encoding to use.
:type lits: iterable(int)
:type weights: iterable(int)
:type bound: int
:type top_id: integer or None
:type vpool: :class:`.IDPool`
:type encoding: integer
:rtype: :class:`pysat.formula.CNF`
"""
return cls._encode(lits, weights=weights, bound=bound, top_id=top_id,
vpool=vpool, encoding=encoding, comparator='<')
@classmethod
def atmost(cls, lits, weights=None, bound=1, top_id=None, vpool=None,
encoding=EncType.best):
"""
A synonim for :meth:`PBEnc.leq`.
"""
return cls.leq(lits, weights=weights, bound=bound, top_id=top_id,
vpool=vpool, encoding=encoding)
@classmethod
def geq(cls, lits, weights=None, bound=1, top_id=None, vpool=None,
encoding=EncType.best):
"""
This method can be used for creating a CNF encoding of a GEQ
(weighted AtLeastK) constraint, i.e. of
:math:`\sum_{i=1}^{n}{a_i\cdot x_i}\geq k`. The method shares the
arguments and the return type with method :meth:`PBEnc.leq`.
Please, see it for details.
"""
return cls._encode(lits, weights=weights, bound=bound, top_id=top_id,
vpool=vpool, encoding=encoding, comparator='>')
@classmethod
def atleast(cls, lits, weights=None, bound=1, top_id=None, vpool=None,
encoding=EncType.best):
"""
A synonym for :meth:`PBEnc.geq`.
"""
return cls.geq(lits, weights=weights, bound=bound, top_id=top_id,
vpool=vpool, encoding=encoding)
@classmethod
def equals(cls, lits, weights=None, bound=1, top_id=None, vpool=None,
encoding=EncType.best):
"""
This method can be used for creating a CNF encoding of a weighted
EqualsK constraint, i.e. of :math:`\sum_{i=1}^{n}{a_i\cdot x_i}=
k`. The method shares the arguments and the return type with
method :meth:`PBEnc.leq`. Please, see it for details.
"""
return cls._encode(lits, weights=weights, bound=bound, top_id=top_id,
vpool=vpool, encoding=encoding, comparator='=')
| 38.104167 | 132 | 0.576886 |
4a1fd6abc4048fe2ccb81204b575ac88467742d3 | 2,116 | py | Python | yql/api/_api_request.py | sourcepirate/yql | 16ebf3674cef74e0b15e026dbcb69d500e3cafd9 | [
"MIT"
] | 1 | 2015-10-26T09:26:14.000Z | 2015-10-26T09:26:14.000Z | yql/api/_api_request.py | plasmashadow/yql | 16ebf3674cef74e0b15e026dbcb69d500e3cafd9 | [
"MIT"
] | 2 | 2015-09-17T03:04:42.000Z | 2016-11-27T03:34:31.000Z | yql/api/_api_request.py | sourcepirate/yql | 16ebf3674cef74e0b15e026dbcb69d500e3cafd9 | [
"MIT"
] | 4 | 2015-08-24T11:25:14.000Z | 2016-11-10T04:30:29.000Z |
from requests import Session
from yql._builder import _YQLBuilder
from ._api_response import _Api_Response
from six.moves.urllib.parse import urlencode
import json
_yahoo_api = 'https://query.yahooapis.com/v1/public/yql'
class _Api_Request(Session):
'''
A new Request of object for creating a
new api session object.
'''
def __init__(self, *args, **kwargs):
"""
Object block consistes of a table name
and builder get initialised for constructing
the quries.
"""
self.__tablename = kwargs.pop('table', None)
self.__yql = _YQLBuilder(self.__tablename)
super(_Api_Request, self).__init__(*args, **kwargs)
def add_filter(self, name, value):
'''Adds a filter on to the request
Args:
name(str): name of the yql column
value(str, int, float): the value to which it should be matched against
Returns:
the current reference for chaining.
'''
self.__yql.filter(name, value)
return self
def get(self, column):
'''Used to query the column on from tyql.
Args:
column(str): name of the column to query with particular filters
if the get function is not invoked then it will query entire
column "*" of yql.
Returns:
the current reference for chaining.
'''
self._yql.get(column)
return self
@property
def result(self):
url = self.__yql._construct()
url = _yahoo_api+"?"+ urlencode(dict(q=url))
response = super(_Api_Request, self).get(url)
return _Api_Response(response)
def json(self):
url = self.__yql._construct()
url = _yahoo_api+"?"+ urlencode(dict(q=url, format="json"))
response = super(_Api_Request, self).get(url)
return _Api_Response(response)
def xml(self):
url = self.__yql._construct()
url = _yahoo_api+"?"+ urlencode(dict(q=url, format="xml"))
response = super(_Api_Request, self).get(url)
return _Api_Response(response)
| 25.804878 | 82 | 0.61673 |
4a1fd761923dcb78f9dbc0ca829482151609465b | 3,371 | py | Python | skl2onnx/operator_converters/decision_tree.py | vinitra-zz/sklearn-onnx | a8f2657525d0b4dd279bcd1a971397d002929a77 | [
"MIT"
] | null | null | null | skl2onnx/operator_converters/decision_tree.py | vinitra-zz/sklearn-onnx | a8f2657525d0b4dd279bcd1a971397d002929a77 | [
"MIT"
] | null | null | null | skl2onnx/operator_converters/decision_tree.py | vinitra-zz/sklearn-onnx | a8f2657525d0b4dd279bcd1a971397d002929a77 | [
"MIT"
] | 1 | 2020-10-01T09:26:27.000Z | 2020-10-01T09:26:27.000Z | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import numbers
import numpy as np
import six
from ..common._apply_operation import apply_cast
from ..common.data_types import Int64TensorType
from ..common._registration import register_converter
from ..common.tree_ensemble import add_tree_to_attribute_pairs
from ..common.tree_ensemble import get_default_tree_classifier_attribute_pairs
from ..common.tree_ensemble import get_default_tree_regressor_attribute_pairs
from ..common.utils_classifier import get_label_classes
from ..proto import onnx_proto
def convert_sklearn_decision_tree_classifier(scope, operator, container):
op = operator.raw_operator
op_type = 'TreeEnsembleClassifier'
attrs = get_default_tree_classifier_attribute_pairs()
attrs['name'] = scope.get_unique_operator_name(op_type)
classes = get_label_classes(scope, op)
if all(isinstance(i, np.ndarray) for i in classes):
classes = np.concatenate(classes)
if all(isinstance(i, (numbers.Real, bool, np.bool_)) for i in classes):
class_labels = [int(i) for i in classes]
attrs['classlabels_int64s'] = class_labels
elif all(isinstance(i, (six.string_types, six.text_type))
for i in classes):
class_labels = [str(i) for i in classes]
attrs['classlabels_strings'] = class_labels
else:
raise ValueError('Labels must be all integers or all strings.')
add_tree_to_attribute_pairs(attrs, True, op.tree_, 0, 1., 0, True,
True, dtype=container.dtype)
container.add_node(
op_type, operator.input_full_names,
[operator.outputs[0].full_name, operator.outputs[1].full_name],
op_domain='ai.onnx.ml', **attrs)
def convert_sklearn_decision_tree_regressor(scope, operator, container):
op = operator.raw_operator
op_type = 'TreeEnsembleRegressor'
attrs = get_default_tree_regressor_attribute_pairs()
attrs['name'] = scope.get_unique_operator_name(op_type)
attrs['n_targets'] = int(op.n_outputs_)
add_tree_to_attribute_pairs(attrs, False, op.tree_, 0, 1., 0, False,
True, dtype=container.dtype)
input_name = operator.input_full_names
if type(operator.inputs[0].type) == Int64TensorType:
cast_input_name = scope.get_unique_variable_name('cast_input')
apply_cast(scope, operator.input_full_names, cast_input_name,
container, to=onnx_proto.TensorProto.FLOAT)
input_name = [cast_input_name]
container.add_node(op_type, input_name,
operator.output_full_names, op_domain='ai.onnx.ml',
**attrs)
register_converter('SklearnDecisionTreeClassifier',
convert_sklearn_decision_tree_classifier)
register_converter('SklearnDecisionTreeRegressor',
convert_sklearn_decision_tree_regressor)
register_converter('SklearnExtraTreeClassifier',
convert_sklearn_decision_tree_classifier)
register_converter('SklearnExtraTreeRegressor',
convert_sklearn_decision_tree_regressor)
| 41.617284 | 78 | 0.690003 |
4a1fd989d452e7603a30dba0225b62e0c2311340 | 11,304 | py | Python | Char5 PPO/PPO_agent.py | rh01/Deep-reinforcement-learning-with-pytorch | fd1853495b885514927c82834f562d2a4df06b28 | [
"MIT"
] | null | null | null | Char5 PPO/PPO_agent.py | rh01/Deep-reinforcement-learning-with-pytorch | fd1853495b885514927c82834f562d2a4df06b28 | [
"MIT"
] | null | null | null | Char5 PPO/PPO_agent.py | rh01/Deep-reinforcement-learning-with-pytorch | fd1853495b885514927c82834f562d2a4df06b28 | [
"MIT"
] | 2 | 2021-05-25T06:40:50.000Z | 2021-09-20T12:38:55.000Z | # coding:utf-8
# PPO implement for war_game
import torch
import torch.nn as nn
from torch import optim
from torch.optim import Adam
from torch.nn import functional as F
from tensorboardX import SummaryWriter
from torch.distributions import Categorical
from torch.nn.functional import smooth_l1_loss
import sys,os,scipy.signal,threading,time,random,copy
import numpy as np
sys.path.append('../../pythonModules')
from pythonModules import wgenv,common,wgfeature,wgsdata
from pythonModules.resnet_model import ResNet
from running_state import ZFilter
from utils import select_actions, evaluate_actions
# Hyper-parameters
LEARNING_RATE = 0.001
class ppo_agent:
def __init__(self, envs, args, net, env_type='atari'):
self.envs = envs
self.args = args
self.env_type = env_type
# define the newtork...
self.net = net
self.old_net = copy.deepcopy(self.net)
# if use the cuda...
if self.args.cuda:
self.net.cuda()
self.old_net.cuda()
# define the optimizer...
self.optimizer = optim.Adam(self.net.parameters(), self.args.lr, eps=self.args.eps)
# running filter...
if self.env_type == 'mujoco':
num_states = self.envs.observation_space.shape[0]
self.running_state = ZFilter((num_states, ), clip=5)
# check saving folder..
if not os.path.exists(self.args.save_dir):
os.mkdir(self.args.save_dir)
# env folder..
self.model_path = os.path.join(self.args.save_dir, self.args.env_name)
if not os.path.exists(self.model_path):
os.mkdir(self.model_path)
# get the observation
self.batch_ob_shape = (self.args.num_workers * self.args.nsteps, ) + self.envs.observation_space.shape
self.obs = np.zeros((self.args.num_workers, ) + self.envs.observation_space.shape, dtype=self.envs.observation_space.dtype.name)
if self.env_type == 'mujoco':
self.obs[:] = np.expand_dims(self.running_state(self.envs.reset()), 0)
else:
self.obs[:] = self.envs.reset()
self.dones = [False for _ in range(self.args.num_workers)]
# start to train the network...
def learn(self):
num_updates = self.args.total_frames // (self.args.nsteps * self.args.num_workers)
# get the reward to calculate other informations
episode_rewards = torch.zeros([self.args.num_workers, 1])
final_rewards = torch.zeros([self.args.num_workers, 1])
for update in range(num_updates):
mb_obs, mb_rewards, mb_actions, mb_dones, mb_values = [], [], [], [], []
for step in range(self.args.nsteps):
with torch.no_grad():
# get tensors
obs_tensor = self._get_tensors(self.obs)
values, pis = self.net(obs_tensor)
# select actions
actions = select_actions(pis, self.args.dist, self.env_type)
if self.env_type == 'atari':
input_actions = actions
else:
if self.args.dist == 'gauss':
input_actions = actions.copy()
elif self.args.dist == 'beta':
input_actions = -1 + 2 * actions
# start to store information
mb_obs.append(np.copy(self.obs))
mb_actions.append(actions)
mb_dones.append(self.dones)
mb_values.append(values.detach().cpu().numpy().squeeze())
# start to excute the actions in the environment
obs, rewards, dones, _ = self.envs.step(input_actions)
# update dones
if self.env_type == 'mujoco':
dones = np.array([dones])
rewards = np.array([rewards])
self.dones = dones
mb_rewards.append(rewards)
# clear the observation
for n, done in enumerate(dones):
if done:
self.obs[n] = self.obs[n] * 0
if self.env_type == 'mujoco':
# reset the environment
obs = self.envs.reset()
self.obs = obs if self.env_type == 'atari' else np.expand_dims(self.running_state(obs), 0)
# process the rewards part -- display the rewards on the screen
rewards = torch.tensor(np.expand_dims(np.stack(rewards), 1), dtype=torch.float32)
episode_rewards += rewards
masks = torch.tensor([[0.0] if done_ else [1.0] for done_ in dones], dtype=torch.float32)
final_rewards *= masks
final_rewards += (1 - masks) * episode_rewards
episode_rewards *= masks
# process the rollouts
mb_obs = np.asarray(mb_obs, dtype=np.float32)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
mb_actions = np.asarray(mb_actions, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
mb_values = np.asarray(mb_values, dtype=np.float32)
if self.env_type == 'mujoco':
mb_values = np.expand_dims(mb_values, 1)
# compute the last state value
with torch.no_grad():
obs_tensor = self._get_tensors(self.obs)
last_values, _ = self.net(obs_tensor)
last_values = last_values.detach().cpu().numpy().squeeze()
# start to compute advantages...
mb_returns = np.zeros_like(mb_rewards)
mb_advs = np.zeros_like(mb_rewards)
lastgaelam = 0
for t in reversed(range(self.args.nsteps)):
if t == self.args.nsteps - 1:
nextnonterminal = 1.0 - self.dones
nextvalues = last_values
else:
nextnonterminal = 1.0 - mb_dones[t + 1]
nextvalues = mb_values[t + 1]
delta = mb_rewards[t] + self.args.gamma * nextvalues * nextnonterminal - mb_values[t]
mb_advs[t] = lastgaelam = delta + self.args.gamma * self.args.tau * nextnonterminal * lastgaelam
mb_returns = mb_advs + mb_values
# after compute the returns, let's process the rollouts
mb_obs = mb_obs.swapaxes(0, 1).reshape(self.batch_ob_shape)
if self.env_type == 'atari':
mb_actions = mb_actions.swapaxes(0, 1).flatten()
mb_returns = mb_returns.swapaxes(0, 1).flatten()
mb_advs = mb_advs.swapaxes(0, 1).flatten()
# before update the network, the old network will try to load the weights
self.old_net.load_state_dict(self.net.state_dict())
# start to update the network
pl, vl, ent = self._update_network(mb_obs, mb_actions, mb_returns, mb_advs)
# display the training information
if update % self.args.display_interval == 0:
print('[{}] Update: {} / {}, Frames: {}, Rewards: {:.3f}, Min: {:.3f}, Max: {:.3f}, PL: {:.3f},'\
'VL: {:.3f}, Ent: {:.3f}'.format(datetime.now(), update, num_updates, (update + 1)*self.args.nsteps*self.args.num_workers, \
final_rewards.mean().item(), final_rewards.min().item(), final_rewards.max().item(), pl, vl, ent))
# save the model
if self.env_type == 'atari':
torch.save(self.net.state_dict(), self.model_path + '/model.pt')
else:
# for the mujoco, we also need to keep the running mean filter!
torch.save([self.net.state_dict(), self.running_state], self.model_path + '/model.pt')
# update the network
def _update_network(self, obs, actions, returns, advantages):
inds = np.arange(obs.shape[0])
nbatch_train = obs.shape[0] // self.args.batch_size
for _ in range(self.args.epoch):
np.random.shuffle(inds)
for start in range(0, obs.shape[0], nbatch_train):
# get the mini-batchs
end = start + nbatch_train
mbinds = inds[start:end]
mb_obs = obs[mbinds]
mb_actions = actions[mbinds]
mb_returns = returns[mbinds]
mb_advs = advantages[mbinds]
# convert minibatches to tensor
mb_obs = self._get_tensors(mb_obs)
mb_actions = torch.tensor(mb_actions, dtype=torch.float32)
mb_returns = torch.tensor(mb_returns, dtype=torch.float32).unsqueeze(1)
mb_advs = torch.tensor(mb_advs, dtype=torch.float32).unsqueeze(1)
# normalize adv
mb_advs = (mb_advs - mb_advs.mean()) / (mb_advs.std() + 1e-8)
if self.args.cuda:
mb_actions = mb_actions.cuda()
mb_returns = mb_returns.cuda()
mb_advs = mb_advs.cuda()
# start to get values
mb_values, pis = self.net(mb_obs)
# start to calculate the value loss...
value_loss = (mb_returns - mb_values).pow(2).mean()
# start to calculate the policy loss
with torch.no_grad():
_, old_pis = self.old_net(mb_obs)
# get the old log probs
old_log_prob, _ = evaluate_actions(old_pis, mb_actions, self.args.dist, self.env_type)
old_log_prob = old_log_prob.detach()
# evaluate the current policy
log_prob, ent_loss = evaluate_actions(pis, mb_actions, self.args.dist, self.env_type)
prob_ratio = torch.exp(log_prob - old_log_prob)
# surr1
surr1 = prob_ratio * mb_advs
surr2 = torch.clamp(prob_ratio, 1 - self.args.clip, 1 + self.args.clip) * mb_advs
policy_loss = -torch.min(surr1, surr2).mean()
# final total loss
total_loss = policy_loss + self.args.vloss_coef * value_loss - ent_loss * self.args.ent_coef
# clear the grad buffer
self.optimizer.zero_grad()
total_loss.backward()
torch.nn.utils.clip_grad_norm_(self.net.parameters(), self.args.max_grad_norm)
# update
self.optimizer.step()
return policy_loss.item(), value_loss.item(), ent_loss.item()
# convert the numpy array to tensors
def _get_tensors(self, obs):
if self.env_type == 'atari':
obs_tensor = torch.tensor(np.transpose(obs, (0, 3, 1, 2)), dtype=torch.float32)
else:
obs_tensor = torch.tensor(obs, dtype=torch.float32)
# decide if put the tensor on the GPU
if self.args.cuda:
obs_tensor = obs_tensor.cuda()
return obs_tensor
# adjust the learning rate
def _adjust_learning_rate(self, update, num_updates):
lr_frac = 1 - (update / num_updates)
adjust_lr = self.args.lr * lr_frac
for param_group in self.optimizer.param_groups:
param_group['lr'] = adjust_lr
| 48.307692 | 144 | 0.570152 |
4a1fda74c03e3193b2cba3e5d7aef1e654406dff | 4,622 | py | Python | ait/core/server/stream.py | robschneider16/AIT-Core | 628e7f60041c4929b82e55dad1a576fc1e8e5e9f | [
"MIT"
] | 1 | 2019-07-29T20:18:34.000Z | 2019-07-29T20:18:34.000Z | ait/core/server/stream.py | seanlu99/AIT-Core | d746079bcff574d930f633bee59337eabf54e99c | [
"MIT"
] | null | null | null | ait/core/server/stream.py | seanlu99/AIT-Core | d746079bcff574d930f633bee59337eabf54e99c | [
"MIT"
] | null | null | null | import ait.core.log
from client import ZMQInputClient, PortInputClient, PortOutputClient
class Stream(object):
"""
This is the base Stream class that all streams will inherit from.
It calls its handlers to execute on all input messages sequentially,
and validates the handler workflow if handler input and output
types were specified.
"""
def __init__(self, name, inputs, handlers, zmq_args={}, **kwargs):
"""
Params:
name: string name of stream (should be unique)
inputs: list of inputs to stream.
if input is int, port number to receive messages on
if input is string, stream or plugin name to receive
messages from
handlers: list of handlers (empty list if no handlers for stream)
zmq_args: (optional) dict containing the follow keys:
zmq_context
zmq_proxy_xsub_url
zmq_proxy_xpub_url
Defaults to empty dict here. Default values
assigned during instantiation of parent class.
**kwargs: (optional) Depends on requirements of child class
Raises:
ValueError: if workflow is not found to be valid based on handlers'
provided input and output types
"""
self.name = name
self.inputs = inputs if inputs is not None else [ ]
self.handlers = handlers
if not self.valid_workflow():
raise ValueError('Sequential workflow inputs and outputs ' +
'are not compatible. Workflow is invalid.')
# This calls __init__ on subclass of ZMQClient
if 'output' in kwargs:
super(Stream, self).__init__(input=self.inputs,
output=kwargs['output'],
**zmq_args)
else:
super(Stream, self).__init__(input=self.inputs,
**zmq_args)
def __repr__(self):
return '<{} name={}>'.format(
str(type(self)).split('.')[-1].split('\'')[0],
self.name)
def process(self, input_data, topic=None):
"""
Invokes each handler in sequence.
Publishes final output data.
Terminates all handler calls and does not publish data if None is received from a single handler.
Params:
input_data: message received by stream
topic: name of plugin or stream message received from,
if applicable
"""
for handler in self.handlers:
output = handler.handle(input_data)
if output:
input_data = output
else:
msg = type(handler).__name__ + " returned no data and caused the handling process to end."
ait.core.log.info(msg)
return
self.publish(input_data)
def valid_workflow(self):
"""
Return true if each handler's output type is the same as
the next handler's input type. Return False if not.
Returns: boolean - True if workflow is valid, False if not
"""
for ix, handler in enumerate(self.handlers[:-1]):
next_input_type = self.handlers[ix + 1].input_type
if (handler.output_type is not None and
next_input_type is not None):
if handler.output_type != next_input_type:
return False
return True
class PortInputStream(Stream, PortInputClient):
"""
This stream type listens for messages from a UDP port and publishes to a ZMQ socket.
"""
def __init__(self, name, inputs, handlers, zmq_args={}):
super(PortInputStream, self).__init__(name, inputs, handlers, zmq_args)
class ZMQStream(Stream, ZMQInputClient):
"""
This stream type listens for messages from another stream or plugin and publishes
to a ZMQ socket.
"""
def __init__(self, name, inputs, handlers, zmq_args={}):
super(ZMQStream, self).__init__(name, inputs, handlers, zmq_args)
class PortOutputStream(Stream, PortOutputClient):
"""
This stream type listens for messages from another stream or plugin and
publishes to a UDP port.
"""
def __init__(self, name, inputs, output, handlers, zmq_args={}):
super(PortOutputStream, self).__init__(name, inputs, handlers, zmq_args, output=output)
| 37.577236 | 106 | 0.581783 |
4a1fdb331c580272aad28db8a7894ac35a049dc5 | 3,191 | py | Python | deeppavlov/models/morpho_tagger/common.py | khakhulin/DeepPavlov | 5f631cc887aa935f2e67b7c65a19c7e777cf7db7 | [
"Apache-2.0"
] | null | null | null | deeppavlov/models/morpho_tagger/common.py | khakhulin/DeepPavlov | 5f631cc887aa935f2e67b7c65a19c7e777cf7db7 | [
"Apache-2.0"
] | null | null | null | deeppavlov/models/morpho_tagger/common.py | khakhulin/DeepPavlov | 5f631cc887aa935f2e67b7c65a19c7e777cf7db7 | [
"Apache-2.0"
] | null | null | null | import re
from pathlib import Path
from deeppavlov.core.commands.infer import build_model_from_config
from deeppavlov.core.commands.utils import set_deeppavlov_root, expand_path
from deeppavlov.core.common.file import read_json
from deeppavlov.core.common.params import from_params
from deeppavlov.core.common.registry import model as get_model
from deeppavlov.core.common.registry import register
from deeppavlov.core.models.component import Component
from deeppavlov.dataset_iterators.morphotagger_iterator import MorphoTaggerDatasetIterator
from deeppavlov.models.morpho_tagger.common_tagger import make_pos_and_tag
def predict_with_model(config_path):
config = read_json(config_path)
set_deeppavlov_root(config)
reader_config = config['dataset_reader']
reader = get_model(reader_config['name'])()
data_path = expand_path(reader_config.get('data_path', ''))
read_params = {k: v for k, v in reader_config.items() if k not in ['name', 'data_path']}
data = reader.read(data_path, **read_params)
iterator_config = config['dataset_iterator']
iterator: MorphoTaggerDatasetIterator =\
from_params(iterator_config, data=data)
model = build_model_from_config(config, load_trained=True)
answers = [None] * len(iterator.test)
batch_size = config['predict'].get("batch_size", -1)
for indexes, (x, _) in iterator.gen_batches(
batch_size=batch_size, data_type="test", shuffle=False, return_indexes=True):
y = model(x)
for i, elem in zip(indexes, y):
answers[i] = elem
outfile = config['predict'].get("outfile")
if outfile is not None:
outfile = Path(outfile)
if not outfile.exists():
outfile.parent.mkdir(parents=True, exist_ok=True)
with open(outfile, "w", encoding="utf8") as fout:
for elem in answers:
fout.write(elem + "\n")
return answers
def prettify(sent, tags, return_string=True, begin="", end="", sep="\n"):
"""
x: str, sentence
y: list, a sequence of tags
x = "John likes, really likes pizza"
y = ["NNP", "VBZ", "PUNCT", "RB", "VBZ", "NN"]
answer:
1 John NNP
2 likes VBZ
3 , PUNCT
4 really RB
5 likes VBZ
6 pizza NN
7 . SENT
"""
if isinstance(sent, str):
words = [x for x in re.split("(\w+|[,.])", sent) if x.strip() != ""]
else:
words = sent
answer = []
for i, (word, tag) in enumerate(zip(words, tags)):
answer.append("{}\t{}\t{}\t{}".format(i+1, word, *make_pos_and_tag(tag)))
if return_string:
answer = begin + sep.join(answer) + end
return answer
@register('tag_output_prettifier')
class TagOutputPrettifier(Component):
def __init__(self, return_string=True, begin="", end="", sep="\n", *args, **kwargs):
self.return_string = return_string
self.begin = begin
self.end = end
self.sep = sep
def __call__(self, X, Y):
return [prettify(x, y, return_string=self.return_string,
begin=self.begin, end=self.end, sep=self.sep)
for x, y in zip(X, Y)]
| 34.311828 | 92 | 0.648073 |
4a1fdc9493cfd33f29d0593636d5d83b9585d5a1 | 1,558 | py | Python | pubgate/utils/cached.py | autogestion/pubgate | e9a1850a60bb34aac59542b97f730c08dc8d46fa | [
"BSD-3-Clause"
] | 79 | 2018-06-23T23:20:20.000Z | 2022-03-22T18:33:37.000Z | pubgate/utils/cached.py | autogestion/pubgate | e9a1850a60bb34aac59542b97f730c08dc8d46fa | [
"BSD-3-Clause"
] | 25 | 2018-11-03T10:14:06.000Z | 2021-01-20T22:33:14.000Z | pubgate/utils/cached.py | autogestion/pubgate | e9a1850a60bb34aac59542b97f730c08dc8d46fa | [
"BSD-3-Clause"
] | 5 | 2018-11-03T10:11:36.000Z | 2020-03-19T14:34:05.000Z | from pubgate import BaseUrl
from pubgate.db import Inbox, User, Outbox
from pubgate.utils.networking import fetch
def cached_mode(request):
if (request.args.get('cached')
and request.app.config.get('APPLY_CASHING')):
return True
async def ensure_inbox(object_id):
# TODO also fetch and cache reactions (replies, likes, shares)
exists = await Inbox.get_by_uri(object_id)
if not exists:
cached_user = await User.find_one({'name': 'cached'})
activity_object = await fetch(object_id)
await Inbox.save(cached_user, {
'type': 'Create',
'id': f'{object_id}#activity',
'published': activity_object['published'],
'object': activity_object
})
async def trace_replies(target):
cls = Outbox if target.startswith(BaseUrl.value) else Inbox
target_object = await cls.get_by_uri(target)
is_reply = target_object.activity['object'].get('inReplyTo')
if is_reply:
await cls.cache.delete(is_reply)
await trace_replies(is_reply)
async def handle_cache(activity, cls):
target = None
print(activity)
if activity['type'] in ['Announce', 'Like']:
target = activity['object']
elif activity['type'] == 'Create' and activity['object'].get('inReplyTo'):
target = activity['object']['inReplyTo']
if target is None:
return
local = target.startswith(BaseUrl.value)
if not local:
await ensure_inbox(target)
await cls.cache.delete(target)
await trace_replies(target)
| 31.16 | 78 | 0.658537 |
4a1fdc96f0a826d83b1f5b838320fe1bfec7b5f1 | 1,879 | py | Python | limacharlie/__init__.py | macdaliot/python-limacharlie | dc8011418f09d3acc0ebce5ea3afb98e4b7f600f | [
"Apache-2.0"
] | null | null | null | limacharlie/__init__.py | macdaliot/python-limacharlie | dc8011418f09d3acc0ebce5ea3afb98e4b7f600f | [
"Apache-2.0"
] | null | null | null | limacharlie/__init__.py | macdaliot/python-limacharlie | dc8011418f09d3acc0ebce5ea3afb98e4b7f600f | [
"Apache-2.0"
] | null | null | null | """limacharlie API for limacharlie.io"""
__version__ = "2.4.0"
__author__ = "Maxime Lamothe-Brassard ( Refraction Point, Inc )"
__author_email__ = "[email protected]"
__license__ = "Apache v2"
__copyright__ = "Copyright (c) 2018 Refraction Point, Inc"
# Global API Credentials
import os
import yaml
# Global credentials are acquired in the following order:
# 1- LC_OID and LC_API_KEY environment variables.
# 2- LC_CREDS_FILE environment variable points to a YAML file with "oid: <OID>" and "api_key: <KEY>".
# 3- Assumes a creds file (like #2) is present at "~/.limacharlie".
GLOBAL_OID = os.environ.get( 'LC_OID', None )
GLOBAL_API_KEY = os.environ.get( 'LC_API_KEY', None )
if GLOBAL_API_KEY is None:
_credsFile = os.environ.get( 'LC_CREDS_FILE', None )
if _credsFile is None:
_credsFile = os.path.expanduser( '~/.limacharlie' )
if os.path.isfile( _credsFile ):
with open( _credsFile, 'rb' ) as f:
_credsFile = yaml.load( f.read() )
_lcEnv = os.environ.get( 'LC_CURRENT_ENV', 'default' )
if _lcEnv == '':
_lcEnv = 'default'
if _lcEnv == 'default':
GLOBAL_OID = _credsFile[ 'oid' ]
GLOBAL_API_KEY = _credsFile[ 'api_key' ]
else:
if _credsFile.get( 'env', {} ).get( _lcEnv, None ) is None:
raise Exception( "LimaCharlie environment specified in LC_CURRENT_ENV could not be found in local config file: ~/.limacharlie" )
GLOBAL_OID = _credsFile[ 'env' ][ _lcEnv ][ 'oid' ]
GLOBAL_API_KEY = _credsFile[ 'env' ][ _lcEnv ][ 'api_key' ]
from .Manager import Manager
from .Firehose import Firehose
from .Spout import Spout
from .Hunter import Hunter
from .Webhook import Webhook
from .Sync import Sync
from .SpotCheck import SpotCheck
from .utils import LcApiException | 41.755556 | 148 | 0.657265 |
4a1fdcac9a0c46350cd16ea5b93621177365317d | 1,085 | py | Python | update_scripts/regression.py | Gaurang484/COVID-19-DataVisualization | 88d4b68fce589d004f3e01602b30cfc46ac97753 | [
"MIT"
] | null | null | null | update_scripts/regression.py | Gaurang484/COVID-19-DataVisualization | 88d4b68fce589d004f3e01602b30cfc46ac97753 | [
"MIT"
] | null | null | null | update_scripts/regression.py | Gaurang484/COVID-19-DataVisualization | 88d4b68fce589d004f3e01602b30cfc46ac97753 | [
"MIT"
] | null | null | null | import numpy as maths
def lin_reg(*series):
'''Fits the data (x, y) to the equation y = ax + b and returns (a, b)'''
if(len(series)) == 1:
y = series[0]
x = [index for index in range(len(y))]
else:
x, y = series
x_ = maths.sum(x) / len(x)
y_ = maths.sum(y) / len(y)
xy_ = maths.dot(x - x_, y - y_)
xx_ = maths.dot(x - x_, x - x_)
a = xy_ / xx_
b = y_ - (a * x_)
return(a, b)
def exp_reg(y):
'''Fits the data (y) to the equation y = b.(a^x) and returns (a, b)'''
y = [y_ for y_ in y if y_ > 0] #Clean non-zero values.
if(len(y) == 0): #If all entries in y are 0, then the required curve is the x-axis.
return(0, 0)
if(len(y) == 1): #If there's only one entry in y, then the required curve is a line parallel to the x-axis: y = y[0]
return(y[0], y[0])
else:
a, b = maths.exp(lin_reg(maths.log(y))) #Fit the series to a linear model using a logarithmic scale.
#y = b.(a^x) => log(y) = x.log(a) + log(b)
return(a, b)
def exp_predict(x, a, b):
'''Returns a prediction for an exponenial series characterised by (a, b).'''
return((a ** x) * b)
| 29.324324 | 118 | 0.590783 |
4a1fdf14f0be3c8a41dfed3ca95901c4b395fea7 | 6,254 | py | Python | test/functional/rpc_spentindex.py | valuero-org/valuero | c0a8d40d377c39792e5a79d4a67f00bc592aef87 | [
"MIT"
] | null | null | null | test/functional/rpc_spentindex.py | valuero-org/valuero | c0a8d40d377c39792e5a79d4a67f00bc592aef87 | [
"MIT"
] | null | null | null | test/functional/rpc_spentindex.py | valuero-org/valuero | c0a8d40d377c39792e5a79d4a67f00bc592aef87 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2018-2019 The Bitcoin Core developers
# Copyright (c) 2017-2019 The Raven Core developers
# Copyright (c) 2018-2019 The Rito Core developers
# Copyright (c) 2019 The Valuero developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test RPC addressindex generation and fetching
#
import time
from test_framework.test_framework import ValueroTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
import binascii
class SpentIndexTest(ValueroTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 4
def setup_network(self):
self.add_nodes(4, [
# Nodes 0/1 are "wallet" nodes
["-debug"],
["-debug", "-spentindex"],
# Nodes 2/3 are used for testing
["-debug", "-spentindex"],
["-debug", "-spentindex", "-txindex"]])
self.start_nodes()
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 0, 2)
connect_nodes_bi(self.nodes, 0, 3)
self.sync_all()
def run_test(self):
print("Mining blocks...")
self.nodes[0].generate(105)
self.sync_all()
chain_height = self.nodes[1].getblockcount()
assert_equal(chain_height, 105)
# Check that
print("Testing spent index...")
feeSatoshis = 10000
privkey = "cSdkPxkAjA4HDr5VHgsebAPDEh9Gyub4HK8UJr2DFGGqKKy4K5sG"
address = "mgY65WSfEmsyYaYPQaXhmXMeBhwp4EcsQW"
addressHash = bytes([11,47,10,12,49,191,224,64,107,12,204,19,129,253,190,49,25,70,218,220])
scriptPubKey = CScript([OP_DUP, OP_HASH160, addressHash, OP_EQUALVERIFY, OP_CHECKSIG])
unspent = self.nodes[0].listunspent()
tx = CTransaction()
amount = int(unspent[0]["amount"] * 100000000 - feeSatoshis)
tx.vin = [CTxIn(COutPoint(int(unspent[0]["txid"], 16), unspent[0]["vout"]))]
tx.vout = [CTxOut(amount, scriptPubKey)]
tx.rehash()
signed_tx = self.nodes[0].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
txid = self.nodes[0].sendrawtransaction(signed_tx["hex"], True)
self.nodes[0].generate(1)
self.sync_all()
print("Testing getspentinfo method...")
# Check that the spentinfo works standalone
info = self.nodes[1].getspentinfo({"txid": unspent[0]["txid"], "index": unspent[0]["vout"]})
assert_equal(info["txid"], txid)
assert_equal(info["index"], 0)
assert_equal(info["height"], 106)
print("Testing getrawtransaction method...")
# Check that verbose raw transaction includes spent info
txVerbose = self.nodes[3].getrawtransaction(unspent[0]["txid"], 1)
assert_equal(txVerbose["vout"][unspent[0]["vout"]]["spentTxId"], txid)
assert_equal(txVerbose["vout"][unspent[0]["vout"]]["spentIndex"], 0)
assert_equal(txVerbose["vout"][unspent[0]["vout"]]["spentHeight"], 106)
# Check that verbose raw transaction includes input values
txVerbose2 = self.nodes[3].getrawtransaction(txid, 1)
assert_equal(float(txVerbose2["vin"][0]["value"]), (amount + feeSatoshis) / 100000000)
assert_equal(txVerbose2["vin"][0]["valueSat"], amount + feeSatoshis)
# Check that verbose raw transaction includes address values and input values
privkey2 = "cSdkPxkAjA4HDr5VHgsebAPDEh9Gyub4HK8UJr2DFGGqKKy4K5sG"
address2 = "mgY65WSfEmsyYaYPQaXhmXMeBhwp4EcsQW"
addressHash2 = bytes([11,47,10,12,49,191,224,64,107,12,204,19,129,253,190,49,25,70,218,220])
scriptPubKey2 = CScript([OP_DUP, OP_HASH160, addressHash2, OP_EQUALVERIFY, OP_CHECKSIG])
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(int(txid, 16), 0))]
amount = int(amount - feeSatoshis);
tx2.vout = [CTxOut(amount, scriptPubKey2)]
tx.rehash()
self.nodes[0].importprivkey(privkey)
signed_tx2 = self.nodes[0].signrawtransaction(binascii.hexlify(tx2.serialize()).decode("utf-8"))
txid2 = self.nodes[0].sendrawtransaction(signed_tx2["hex"], True)
# Check the mempool index
self.sync_all()
txVerbose3 = self.nodes[1].getrawtransaction(txid2, 1)
assert_equal(txVerbose3["vin"][0]["address"], address2)
assert_equal(txVerbose3["vin"][0]["valueSat"], amount + feeSatoshis)
assert_equal(float(txVerbose3["vin"][0]["value"]), (amount + feeSatoshis) / 100000000)
# Check the database index
block_hash = self.nodes[0].generate(1)
self.sync_all()
txVerbose4 = self.nodes[3].getrawtransaction(txid2, 1)
assert_equal(txVerbose4["vin"][0]["address"], address2)
assert_equal(txVerbose4["vin"][0]["valueSat"], amount + feeSatoshis)
assert_equal(float(txVerbose4["vin"][0]["value"]), (amount + feeSatoshis) / 100000000)
# Check block deltas
print("Testing getblockdeltas...")
block = self.nodes[3].getblockdeltas(block_hash[0])
assert_equal(len(block["deltas"]), 2)
assert_equal(block["deltas"][0]["index"], 0)
assert_equal(len(block["deltas"][0]["inputs"]), 0)
assert_equal(len(block["deltas"][0]["outputs"]), 0)
assert_equal(block["deltas"][1]["index"], 1)
assert_equal(block["deltas"][1]["txid"], txid2)
assert_equal(block["deltas"][1]["inputs"][0]["index"], 0)
assert_equal(block["deltas"][1]["inputs"][0]["address"], "mgY65WSfEmsyYaYPQaXhmXMeBhwp4EcsQW")
assert_equal(block["deltas"][1]["inputs"][0]["satoshis"], (amount + feeSatoshis) * -1)
assert_equal(block["deltas"][1]["inputs"][0]["prevtxid"], txid)
assert_equal(block["deltas"][1]["inputs"][0]["prevout"], 0)
assert_equal(block["deltas"][1]["outputs"][0]["index"], 0)
assert_equal(block["deltas"][1]["outputs"][0]["address"], "mgY65WSfEmsyYaYPQaXhmXMeBhwp4EcsQW")
assert_equal(block["deltas"][1]["outputs"][0]["satoshis"], amount)
print("Passed\n")
if __name__ == '__main__':
SpentIndexTest().main()
| 42.544218 | 104 | 0.644867 |
4a1fdf8524c34406cd29ab3eddcaa86b41059938 | 474 | py | Python | ignition/script-python/pidbotscripts/utils/data_mapping/code.py | jlbcontrols/pidbot-manager | 42dbc4487120aa7ea4405aec023b79955683ba31 | [
"MIT"
] | 3 | 2020-10-17T02:47:34.000Z | 2021-10-31T08:14:39.000Z | ignition/script-python/pidbotscripts/utils/data_mapping/code.py | jlbcontrols/pidbot-manager | 42dbc4487120aa7ea4405aec023b79955683ba31 | [
"MIT"
] | 2 | 2020-11-11T19:13:51.000Z | 2020-11-11T19:14:54.000Z | ignition/script-python/pidbotscripts/utils/data_mapping/code.py | jlbcontrols/pidbot-manager | 42dbc4487120aa7ea4405aec023b79955683ba31 | [
"MIT"
] | null | null | null | def listToColumnDs(list,header="values"):
headers = [header]
data = [[item] for item in list]
return system.dataset.toDataSet(headers,data)
def dictToRowDs(dict):
row = []
headers = []
for key, value in dict.items():
headers.append(key)
row.append(value)
return system.dataset.toDataSet(headers,[row])
def rowDsToDict(dataset):
dict = {}
for col in range(0,dataset.getColumnCount()):
dict[dataset.getColumnName(col)] = dataset.getValueAt(0,col)
return dict | 26.333333 | 62 | 0.721519 |
4a1fdf9357fe73f62e5e7f64d0f47ce820cbef3c | 5,700 | py | Python | src/schemathesis/specs/graphql/schemas.py | schemathesis/schemathesis | 5684c826fc16cefed956a30ec587b7271da86381 | [
"MIT"
] | 659 | 2020-09-03T13:27:50.000Z | 2022-03-31T17:07:16.000Z | src/schemathesis/specs/graphql/schemas.py | schemathesis/schemathesis | 5684c826fc16cefed956a30ec587b7271da86381 | [
"MIT"
] | 570 | 2020-09-03T15:57:43.000Z | 2022-03-31T17:13:52.000Z | src/schemathesis/specs/graphql/schemas.py | schemathesis/schemathesis | 5684c826fc16cefed956a30ec587b7271da86381 | [
"MIT"
] | 66 | 2020-09-05T07:09:03.000Z | 2022-03-17T08:17:55.000Z | from functools import partial
from typing import Any, Dict, Generator, List, Optional, Sequence, Tuple, Type, TypeVar, Union, cast
from urllib.parse import urlsplit
import attr
import graphql
import requests
from hypothesis import strategies as st
from hypothesis.strategies import SearchStrategy
from hypothesis_graphql import strategies as gql_st
from ...checks import not_a_server_error
from ...constants import DataGenerationMethod
from ...exceptions import InvalidSchema
from ...hooks import HookDispatcher
from ...models import APIOperation, Case, CheckFunction, OperationDefinition
from ...schemas import BaseSchema
from ...stateful import Stateful, StatefulTest
from ...types import Body, Cookies, Headers, NotSet, PathParameters, Query
from ...utils import NOT_SET, GenericResponse, Ok, Result
@attr.s(slots=True, repr=False) # pragma: no mutate
class GraphQLCase(Case):
def as_requests_kwargs(
self, base_url: Optional[str] = None, headers: Optional[Dict[str, str]] = None
) -> Dict[str, Any]:
final_headers = self._get_headers(headers)
base_url = self._get_base_url(base_url)
kwargs: Dict[str, Any] = {"method": self.method, "url": base_url, "headers": final_headers}
# There is no direct way to have bytes here, but it is a useful pattern to support.
# It also unifies GraphQLCase with its Open API counterpart where bytes may come from external examples
if isinstance(self.body, bytes):
kwargs["data"] = self.body
# Assume that the payload is JSON, not raw GraphQL queries
kwargs["headers"].setdefault("Content-Type", "application/json")
else:
kwargs["json"] = {"query": self.body}
return kwargs
def as_werkzeug_kwargs(self, headers: Optional[Dict[str, str]] = None) -> Dict[str, Any]:
final_headers = self._get_headers(headers)
return {
"method": self.method,
"path": self.operation.schema.get_full_path(self.formatted_path),
"headers": final_headers,
"query_string": self.query,
"json": {"query": self.body},
}
def validate_response(
self,
response: GenericResponse,
checks: Tuple[CheckFunction, ...] = (),
additional_checks: Tuple[CheckFunction, ...] = (),
code_sample_style: Optional[str] = None,
) -> None:
checks = checks or (not_a_server_error,)
checks += additional_checks
return super().validate_response(response, checks, code_sample_style=code_sample_style)
def call_asgi(
self,
app: Any = None,
base_url: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> requests.Response:
return super().call_asgi(app=app, base_url=base_url, headers=headers, **kwargs)
C = TypeVar("C", bound=Case)
@attr.s() # pragma: no mutate
class GraphQLSchema(BaseSchema):
def get_full_path(self, path: str) -> str:
return self.base_path
@property # pragma: no mutate
def verbose_name(self) -> str:
return "GraphQL"
@property
def client_schema(self) -> graphql.GraphQLSchema:
return graphql.build_client_schema(self.raw_schema)
@property
def base_path(self) -> str:
if self.base_url:
return urlsplit(self.base_url).path
return self._get_base_path()
def _get_base_path(self) -> str:
return cast(str, urlsplit(self.location).path)
def get_all_operations(self) -> Generator[Result[APIOperation, InvalidSchema], None, None]:
schema = self.client_schema
if schema.query_type is None:
return
for field_name, definition in schema.query_type.fields.items():
yield Ok(
APIOperation(
base_url=self.get_base_url(),
path=self.base_path,
verbose_name=field_name,
method="POST",
app=self.app,
schema=self,
# Parameters are not yet supported
definition=OperationDefinition(raw=definition, resolved=definition, scope="", parameters=[]),
case_cls=GraphQLCase,
)
)
def get_case_strategy(
self,
operation: APIOperation,
hooks: Optional[HookDispatcher] = None,
data_generation_method: DataGenerationMethod = DataGenerationMethod.default(),
) -> SearchStrategy:
constructor = partial(GraphQLCase, operation=operation, data_generation_method=data_generation_method)
return st.builds(constructor, body=gql_st.query(self.client_schema, fields=[operation.verbose_name]))
def get_strategies_from_examples(self, operation: APIOperation) -> List[SearchStrategy[Case]]:
return []
def get_stateful_tests(
self, response: GenericResponse, operation: APIOperation, stateful: Optional[Stateful]
) -> Sequence[StatefulTest]:
return []
def make_case(
self,
*,
case_cls: Type[C],
operation: APIOperation,
path_parameters: Optional[PathParameters] = None,
headers: Optional[Headers] = None,
cookies: Optional[Cookies] = None,
query: Optional[Query] = None,
body: Union[Body, NotSet] = NOT_SET,
media_type: Optional[str] = None,
) -> C:
return case_cls(
operation=operation,
path_parameters=path_parameters,
headers=headers,
cookies=cookies,
query=query,
body=body,
media_type=media_type,
)
| 37.012987 | 113 | 0.640175 |
4a1fe0dccad6101273e473887ee5da60e271907f | 12,571 | py | Python | lxmls/readers/galton.py | SimonSuster/lxmls-toolkit | 6a57884f8b7c98da816a60eb88593e0a1585d434 | [
"MIT"
] | 1 | 2015-09-20T05:16:38.000Z | 2015-09-20T05:16:38.000Z | lxmls/readers/galton.py | daviddao/LxMLS-labs-solution | 78413c1ee61752ca33988c454e3b2c27326e7063 | [
"MIT"
] | null | null | null | lxmls/readers/galton.py | daviddao/LxMLS-labs-solution | 78413c1ee61752ca33988c454e3b2c27326e7063 | [
"MIT"
] | null | null | null | import numpy as np
_data = np.array([
[70.5,61.7], [68.5,61.7], [65.5,61.7], [64.5,61.7], [64,61.7], [67.5,62.2], [67.5,62.2], [67.5,62.2],
[66.5,62.2], [66.5,62.2], [66.5,62.2], [64.5,62.2], [70.5,63.2], [69.5,63.2], [68.5,63.2], [68.5,63.2],
[68.5,63.2], [68.5,63.2], [68.5,63.2], [68.5,63.2], [68.5,63.2], [67.5,63.2], [67.5,63.2], [67.5,63.2],
[67.5,63.2], [67.5,63.2], [66.5,63.2], [66.5,63.2], [66.5,63.2], [65.5,63.2], [65.5,63.2], [65.5,63.2],
[65.5,63.2], [65.5,63.2], [65.5,63.2], [65.5,63.2], [65.5,63.2], [65.5,63.2], [64.5,63.2], [64.5,63.2],
[64.5,63.2], [64.5,63.2], [64,63.2], [64,63.2], [69.5,64.2], [69.5,64.2], [69.5,64.2], [69.5,64.2],
[69.5,64.2], [69.5,64.2], [69.5,64.2], [69.5,64.2], [69.5,64.2], [69.5,64.2], [69.5,64.2], [69.5,64.2],
[69.5,64.2], [69.5,64.2], [69.5,64.2], [69.5,64.2], [68.5,64.2], [68.5,64.2], [68.5,64.2], [68.5,64.2],
[68.5,64.2], [68.5,64.2], [68.5,64.2], [68.5,64.2], [68.5,64.2], [68.5,64.2], [68.5,64.2], [67.5,64.2],
[67.5,64.2], [67.5,64.2], [67.5,64.2], [67.5,64.2], [67.5,64.2], [67.5,64.2], [67.5,64.2], [67.5,64.2],
[67.5,64.2], [67.5,64.2], [67.5,64.2], [67.5,64.2], [67.5,64.2], [66.5,64.2], [66.5,64.2], [66.5,64.2],
[66.5,64.2], [66.5,64.2], [65.5,64.2], [65.5,64.2], [65.5,64.2], [65.5,64.2], [65.5,64.2], [64.5,64.2],
[64.5,64.2], [64.5,64.2], [64.5,64.2], [64,64.2], [64,64.2], [64,64.2], [64,64.2], [71.5,65.2],
[70.5,65.2], [69.5,65.2], [69.5,65.2], [69.5,65.2], [69.5,65.2], [68.5,65.2], [68.5,65.2], [68.5,65.2],
[68.5,65.2], [68.5,65.2], [68.5,65.2], [68.5,65.2], [68.5,65.2], [68.5,65.2], [68.5,65.2], [68.5,65.2],
[68.5,65.2], [68.5,65.2], [68.5,65.2], [68.5,65.2], [68.5,65.2], [67.5,65.2], [67.5,65.2], [67.5,65.2],
[67.5,65.2], [67.5,65.2], [67.5,65.2], [67.5,65.2], [67.5,65.2], [67.5,65.2], [67.5,65.2], [67.5,65.2],
[67.5,65.2], [67.5,65.2], [67.5,65.2], [67.5,65.2], [66.5,65.2], [66.5,65.2], [65.5,65.2], [65.5,65.2],
[65.5,65.2], [65.5,65.2], [65.5,65.2], [65.5,65.2], [65.5,65.2], [64.5,65.2], [64,65.2], [71.5,66.2],
[71.5,66.2], [71.5,66.2], [70.5,66.2], [69.5,66.2], [69.5,66.2], [69.5,66.2], [69.5,66.2], [69.5,66.2],
[69.5,66.2], [69.5,66.2], [69.5,66.2], [69.5,66.2], [69.5,66.2], [69.5,66.2], [69.5,66.2], [69.5,66.2],
[69.5,66.2], [69.5,66.2], [69.5,66.2], [69.5,66.2], [68.5,66.2], [68.5,66.2], [68.5,66.2], [68.5,66.2],
[68.5,66.2], [68.5,66.2], [68.5,66.2], [68.5,66.2], [68.5,66.2], [68.5,66.2], [68.5,66.2], [68.5,66.2],
[68.5,66.2], [68.5,66.2], [68.5,66.2], [68.5,66.2], [68.5,66.2], [68.5,66.2], [68.5,66.2], [68.5,66.2],
[68.5,66.2], [68.5,66.2], [68.5,66.2], [68.5,66.2], [68.5,66.2], [67.5,66.2], [67.5,66.2], [67.5,66.2],
[67.5,66.2], [67.5,66.2], [67.5,66.2], [67.5,66.2], [67.5,66.2], [67.5,66.2], [67.5,66.2], [67.5,66.2],
[67.5,66.2], [67.5,66.2], [67.5,66.2], [67.5,66.2], [67.5,66.2], [67.5,66.2], [67.5,66.2], [67.5,66.2],
[67.5,66.2], [67.5,66.2], [67.5,66.2], [67.5,66.2], [67.5,66.2], [67.5,66.2], [67.5,66.2], [67.5,66.2],
[67.5,66.2], [67.5,66.2], [67.5,66.2], [67.5,66.2], [67.5,66.2], [67.5,66.2], [67.5,66.2], [67.5,66.2],
[67.5,66.2], [66.5,66.2], [66.5,66.2], [66.5,66.2], [66.5,66.2], [66.5,66.2], [66.5,66.2], [66.5,66.2],
[66.5,66.2], [66.5,66.2], [66.5,66.2], [66.5,66.2], [66.5,66.2], [66.5,66.2], [66.5,66.2], [66.5,66.2],
[66.5,66.2], [66.5,66.2], [65.5,66.2], [65.5,66.2], [65.5,66.2], [65.5,66.2], [65.5,66.2], [65.5,66.2],
[65.5,66.2], [65.5,66.2], [65.5,66.2], [65.5,66.2], [65.5,66.2], [64.5,66.2], [64.5,66.2], [64.5,66.2],
[64.5,66.2], [64.5,66.2], [64,66.2], [64,66.2], [71.5,67.2], [71.5,67.2], [71.5,67.2], [71.5,67.2],
[70.5,67.2], [70.5,67.2], [70.5,67.2], [69.5,67.2], [69.5,67.2], [69.5,67.2], [69.5,67.2], [69.5,67.2],
[69.5,67.2], [69.5,67.2], [69.5,67.2], [69.5,67.2], [69.5,67.2], [69.5,67.2], [69.5,67.2], [69.5,67.2],
[69.5,67.2], [69.5,67.2], [69.5,67.2], [69.5,67.2], [69.5,67.2], [69.5,67.2], [69.5,67.2], [69.5,67.2],
[69.5,67.2], [69.5,67.2], [69.5,67.2], [69.5,67.2], [69.5,67.2], [69.5,67.2], [68.5,67.2], [68.5,67.2],
[68.5,67.2], [68.5,67.2], [68.5,67.2], [68.5,67.2], [68.5,67.2], [68.5,67.2], [68.5,67.2], [68.5,67.2],
[68.5,67.2], [68.5,67.2], [68.5,67.2], [68.5,67.2], [68.5,67.2], [68.5,67.2], [68.5,67.2], [68.5,67.2],
[68.5,67.2], [68.5,67.2], [68.5,67.2], [68.5,67.2], [68.5,67.2], [68.5,67.2], [68.5,67.2], [68.5,67.2],
[68.5,67.2], [68.5,67.2], [68.5,67.2], [68.5,67.2], [68.5,67.2], [67.5,67.2], [67.5,67.2], [67.5,67.2],
[67.5,67.2], [67.5,67.2], [67.5,67.2], [67.5,67.2], [67.5,67.2], [67.5,67.2], [67.5,67.2], [67.5,67.2],
[67.5,67.2], [67.5,67.2], [67.5,67.2], [67.5,67.2], [67.5,67.2], [67.5,67.2], [67.5,67.2], [67.5,67.2],
[67.5,67.2], [67.5,67.2], [67.5,67.2], [67.5,67.2], [67.5,67.2], [67.5,67.2], [67.5,67.2], [67.5,67.2],
[67.5,67.2], [67.5,67.2], [67.5,67.2], [67.5,67.2], [67.5,67.2], [67.5,67.2], [67.5,67.2], [67.5,67.2],
[67.5,67.2], [67.5,67.2], [67.5,67.2], [66.5,67.2], [66.5,67.2], [66.5,67.2], [66.5,67.2], [66.5,67.2],
[66.5,67.2], [66.5,67.2], [66.5,67.2], [66.5,67.2], [66.5,67.2], [66.5,67.2], [66.5,67.2], [66.5,67.2],
[66.5,67.2], [66.5,67.2], [66.5,67.2], [66.5,67.2], [65.5,67.2], [65.5,67.2], [65.5,67.2], [65.5,67.2],
[65.5,67.2], [65.5,67.2], [65.5,67.2], [65.5,67.2], [65.5,67.2], [65.5,67.2], [65.5,67.2], [64.5,67.2],
[64.5,67.2], [64.5,67.2], [64.5,67.2], [64.5,67.2], [64,67.2], [64,67.2], [72.5,68.2], [71.5,68.2],
[71.5,68.2], [71.5,68.2], [70.5,68.2], [70.5,68.2], [70.5,68.2], [70.5,68.2], [70.5,68.2], [70.5,68.2],
[70.5,68.2], [70.5,68.2], [70.5,68.2], [70.5,68.2], [70.5,68.2], [70.5,68.2], [69.5,68.2], [69.5,68.2],
[69.5,68.2], [69.5,68.2], [69.5,68.2], [69.5,68.2], [69.5,68.2], [69.5,68.2], [69.5,68.2], [69.5,68.2],
[69.5,68.2], [69.5,68.2], [69.5,68.2], [69.5,68.2], [69.5,68.2], [69.5,68.2], [69.5,68.2], [69.5,68.2],
[69.5,68.2], [69.5,68.2], [68.5,68.2], [68.5,68.2], [68.5,68.2], [68.5,68.2], [68.5,68.2], [68.5,68.2],
[68.5,68.2], [68.5,68.2], [68.5,68.2], [68.5,68.2], [68.5,68.2], [68.5,68.2], [68.5,68.2], [68.5,68.2],
[68.5,68.2], [68.5,68.2], [68.5,68.2], [68.5,68.2], [68.5,68.2], [68.5,68.2], [68.5,68.2], [68.5,68.2],
[68.5,68.2], [68.5,68.2], [68.5,68.2], [68.5,68.2], [68.5,68.2], [68.5,68.2], [68.5,68.2], [68.5,68.2],
[68.5,68.2], [68.5,68.2], [68.5,68.2], [68.5,68.2], [67.5,68.2], [67.5,68.2], [67.5,68.2], [67.5,68.2],
[67.5,68.2], [67.5,68.2], [67.5,68.2], [67.5,68.2], [67.5,68.2], [67.5,68.2], [67.5,68.2], [67.5,68.2],
[67.5,68.2], [67.5,68.2], [67.5,68.2], [67.5,68.2], [67.5,68.2], [67.5,68.2], [67.5,68.2], [67.5,68.2],
[67.5,68.2], [67.5,68.2], [67.5,68.2], [67.5,68.2], [67.5,68.2], [67.5,68.2], [67.5,68.2], [67.5,68.2],
[66.5,68.2], [66.5,68.2], [66.5,68.2], [66.5,68.2], [66.5,68.2], [66.5,68.2], [66.5,68.2], [66.5,68.2],
[66.5,68.2], [66.5,68.2], [66.5,68.2], [66.5,68.2], [66.5,68.2], [66.5,68.2], [65.5,68.2], [65.5,68.2],
[65.5,68.2], [65.5,68.2], [65.5,68.2], [65.5,68.2], [65.5,68.2], [64,68.2], [72.5,69.2], [72.5,69.2],
[71.5,69.2], [71.5,69.2], [71.5,69.2], [71.5,69.2], [71.5,69.2], [70.5,69.2], [70.5,69.2], [70.5,69.2],
[70.5,69.2], [70.5,69.2], [70.5,69.2], [70.5,69.2], [70.5,69.2], [70.5,69.2], [70.5,69.2], [70.5,69.2],
[70.5,69.2], [70.5,69.2], [70.5,69.2], [70.5,69.2], [70.5,69.2], [70.5,69.2], [70.5,69.2], [69.5,69.2],
[69.5,69.2], [69.5,69.2], [69.5,69.2], [69.5,69.2], [69.5,69.2], [69.5,69.2], [69.5,69.2], [69.5,69.2],
[69.5,69.2], [69.5,69.2], [69.5,69.2], [69.5,69.2], [69.5,69.2], [69.5,69.2], [69.5,69.2], [69.5,69.2],
[69.5,69.2], [69.5,69.2], [69.5,69.2], [69.5,69.2], [69.5,69.2], [69.5,69.2], [69.5,69.2], [69.5,69.2],
[69.5,69.2], [69.5,69.2], [69.5,69.2], [69.5,69.2], [69.5,69.2], [69.5,69.2], [69.5,69.2], [69.5,69.2],
[68.5,69.2], [68.5,69.2], [68.5,69.2], [68.5,69.2], [68.5,69.2], [68.5,69.2], [68.5,69.2], [68.5,69.2],
[68.5,69.2], [68.5,69.2], [68.5,69.2], [68.5,69.2], [68.5,69.2], [68.5,69.2], [68.5,69.2], [68.5,69.2],
[68.5,69.2], [68.5,69.2], [68.5,69.2], [68.5,69.2], [68.5,69.2], [68.5,69.2], [68.5,69.2], [68.5,69.2],
[68.5,69.2], [68.5,69.2], [68.5,69.2], [68.5,69.2], [68.5,69.2], [68.5,69.2], [68.5,69.2], [68.5,69.2],
[68.5,69.2], [68.5,69.2], [68.5,69.2], [68.5,69.2], [68.5,69.2], [68.5,69.2], [68.5,69.2], [68.5,69.2],
[68.5,69.2], [68.5,69.2], [68.5,69.2], [68.5,69.2], [68.5,69.2], [68.5,69.2], [68.5,69.2], [68.5,69.2],
[67.5,69.2], [67.5,69.2], [67.5,69.2], [67.5,69.2], [67.5,69.2], [67.5,69.2], [67.5,69.2], [67.5,69.2],
[67.5,69.2], [67.5,69.2], [67.5,69.2], [67.5,69.2], [67.5,69.2], [67.5,69.2], [67.5,69.2], [67.5,69.2],
[67.5,69.2], [67.5,69.2], [67.5,69.2], [67.5,69.2], [67.5,69.2], [67.5,69.2], [67.5,69.2], [67.5,69.2],
[67.5,69.2], [67.5,69.2], [67.5,69.2], [67.5,69.2], [67.5,69.2], [67.5,69.2], [67.5,69.2], [67.5,69.2],
[67.5,69.2], [67.5,69.2], [67.5,69.2], [67.5,69.2], [67.5,69.2], [67.5,69.2], [66.5,69.2], [66.5,69.2],
[66.5,69.2], [66.5,69.2], [66.5,69.2], [66.5,69.2], [66.5,69.2], [66.5,69.2], [66.5,69.2], [66.5,69.2],
[66.5,69.2], [66.5,69.2], [66.5,69.2], [65.5,69.2], [65.5,69.2], [65.5,69.2], [65.5,69.2], [65.5,69.2],
[65.5,69.2], [65.5,69.2], [64.5,69.2], [64.5,69.2], [64,69.2], [72.5,70.2], [71.5,70.2], [71.5,70.2],
[71.5,70.2], [71.5,70.2], [71.5,70.2], [71.5,70.2], [71.5,70.2], [71.5,70.2], [71.5,70.2], [71.5,70.2],
[70.5,70.2], [70.5,70.2], [70.5,70.2], [70.5,70.2], [70.5,70.2], [70.5,70.2], [70.5,70.2], [70.5,70.2],
[70.5,70.2], [70.5,70.2], [70.5,70.2], [70.5,70.2], [70.5,70.2], [70.5,70.2], [69.5,70.2], [69.5,70.2],
[69.5,70.2], [69.5,70.2], [69.5,70.2], [69.5,70.2], [69.5,70.2], [69.5,70.2], [69.5,70.2], [69.5,70.2],
[69.5,70.2], [69.5,70.2], [69.5,70.2], [69.5,70.2], [69.5,70.2], [69.5,70.2], [69.5,70.2], [69.5,70.2],
[69.5,70.2], [69.5,70.2], [69.5,70.2], [69.5,70.2], [69.5,70.2], [69.5,70.2], [69.5,70.2], [68.5,70.2],
[68.5,70.2], [68.5,70.2], [68.5,70.2], [68.5,70.2], [68.5,70.2], [68.5,70.2], [68.5,70.2], [68.5,70.2],
[68.5,70.2], [68.5,70.2], [68.5,70.2], [68.5,70.2], [68.5,70.2], [68.5,70.2], [68.5,70.2], [68.5,70.2],
[68.5,70.2], [68.5,70.2], [68.5,70.2], [68.5,70.2], [67.5,70.2], [67.5,70.2], [67.5,70.2], [67.5,70.2],
[67.5,70.2], [67.5,70.2], [67.5,70.2], [67.5,70.2], [67.5,70.2], [67.5,70.2], [67.5,70.2], [67.5,70.2],
[67.5,70.2], [67.5,70.2], [67.5,70.2], [67.5,70.2], [67.5,70.2], [67.5,70.2], [67.5,70.2], [66.5,70.2],
[66.5,70.2], [66.5,70.2], [66.5,70.2], [65.5,70.2], [65.5,70.2], [65.5,70.2], [65.5,70.2], [65.5,70.2],
[72.5,71.2], [72.5,71.2], [71.5,71.2], [71.5,71.2], [71.5,71.2], [71.5,71.2], [70.5,71.2], [70.5,71.2],
[70.5,71.2], [70.5,71.2], [70.5,71.2], [70.5,71.2], [70.5,71.2], [69.5,71.2], [69.5,71.2], [69.5,71.2],
[69.5,71.2], [69.5,71.2], [69.5,71.2], [69.5,71.2], [69.5,71.2], [69.5,71.2], [69.5,71.2], [69.5,71.2],
[69.5,71.2], [69.5,71.2], [69.5,71.2], [69.5,71.2], [69.5,71.2], [69.5,71.2], [69.5,71.2], [69.5,71.2],
[69.5,71.2], [68.5,71.2], [68.5,71.2], [68.5,71.2], [68.5,71.2], [68.5,71.2], [68.5,71.2], [68.5,71.2],
[68.5,71.2], [68.5,71.2], [68.5,71.2], [68.5,71.2], [68.5,71.2], [68.5,71.2], [68.5,71.2], [68.5,71.2],
[68.5,71.2], [68.5,71.2], [68.5,71.2], [67.5,71.2], [67.5,71.2], [67.5,71.2], [67.5,71.2], [67.5,71.2],
[67.5,71.2], [67.5,71.2], [67.5,71.2], [67.5,71.2], [67.5,71.2], [67.5,71.2], [65.5,71.2], [65.5,71.2],
[73,72.2], [72.5,72.2], [72.5,72.2], [72.5,72.2], [72.5,72.2], [72.5,72.2], [72.5,72.2], [72.5,72.2],
[71.5,72.2], [71.5,72.2], [71.5,72.2], [71.5,72.2], [71.5,72.2], [71.5,72.2], [71.5,72.2], [71.5,72.2],
[71.5,72.2], [70.5,72.2], [70.5,72.2], [70.5,72.2], [70.5,72.2], [69.5,72.2], [69.5,72.2], [69.5,72.2],
[69.5,72.2], [69.5,72.2], [69.5,72.2], [69.5,72.2], [69.5,72.2], [69.5,72.2], [69.5,72.2], [69.5,72.2],
[68.5,72.2], [68.5,72.2], [68.5,72.2], [68.5,72.2], [67.5,72.2], [67.5,72.2], [67.5,72.2], [67.5,72.2],
[65.5,72.2], [73,73.2], [73,73.2], [73,73.2], [72.5,73.2], [72.5,73.2], [71.5,73.2], [71.5,73.2],
[70.5,73.2], [70.5,73.2], [70.5,73.2], [69.5,73.2], [69.5,73.2], [69.5,73.2], [69.5,73.2], [68.5,73.2],
[68.5,73.2], [68.5,73.2], [72.5,73.7], [72.5,73.7], [72.5,73.7], [72.5,73.7], [71.5,73.7], [71.5,73.7],
[70.5,73.7], [70.5,73.7], [70.5,73.7], [69.5,73.7], [69.5,73.7], [69.5,73.7], [69.5,73.7], [69.5,73.7],
])
def load():
return _data.copy()
| 102.203252 | 107 | 0.44539 |
4a1fe1400573df712ec090123aeeee923bb4cc82 | 6,212 | py | Python | thonny/plugins/misc_analyzers.py | webduino-cn/thonny | 74da2278aa018eafec697c2b92e2355237669ecd | [
"MIT"
] | 1 | 2021-06-12T22:24:40.000Z | 2021-06-12T22:24:40.000Z | Thonny/Lib/site-packages/thonny/plugins/misc_analyzers.py | Pydiderot/pydiderotIDE | a42fcde3ea837ae40c957469f5d87427e8ce46d3 | [
"MIT"
] | 30 | 2019-01-04T10:14:56.000Z | 2020-10-12T14:00:31.000Z | Thonny/Lib/site-packages/thonny/plugins/misc_analyzers.py | Pydiderot/pydiderotIDE | a42fcde3ea837ae40c957469f5d87427e8ce46d3 | [
"MIT"
] | 3 | 2018-11-24T14:00:30.000Z | 2019-07-02T02:32:26.000Z | from thonny.assistance import ProgramAnalyzer, add_program_analyzer
from thonny import get_runner, rst_utils
from thonny.running import CPythonProxy
import logging
import os
from thonny.common import is_same_path
known_stdlib_modules = {
# Compiled from https://docs.python.org/3.7/py-modindex.html
"__future__",
"__main__",
"_dummy_thread",
"_thread",
"abc",
"aifc",
"argparse",
"array",
"ast",
"asynchat",
"asyncio",
"asyncore",
"atexit",
"audioop",
"base64",
"bdb",
"binascii",
"binhex",
"bisect",
"builtins",
"bz2",
"calendar",
"cgi",
"cgitb",
"chunk",
"cmath",
"cmd",
"code",
"codecs",
"codeop",
"collections",
"colorsys",
"compileall",
"concurrent",
"configparser",
"contextlib",
"contextvars",
"copy",
"copyreg",
"cProfile",
"crypt",
"csv",
"ctypes",
"curses",
"dataclasses",
"datetime",
"dbm",
"decimal",
"difflib",
"dis",
"distutils",
"doctest",
"dummy_threading",
"email",
"encodings",
"ensurepip",
"enum",
"errno",
"faulthandler",
"fcntl",
"filecmp",
"fileinput",
"fnmatch",
"formatter",
"fractions",
"ftplib",
"functools",
"gc",
"getopt",
"getpass",
"gettext",
"glob",
"grp",
"gzip",
"hashlib",
"heapq",
"hmac",
"html",
"http",
"imaplib",
"imghdr",
"imp",
"importlib",
"inspect",
"io",
"ipaddress",
"itertools",
"json",
"keyword",
"lib2to3",
"linecache",
"locale",
"logging",
"lzma",
"macpath",
"mailbox",
"mailcap",
"marshal",
"math",
"mimetypes",
"mmap",
"modulefinder",
"msilib",
"msvcrt",
"multiprocessing",
"netrc",
"nis",
"nntplib",
"numbers",
"operator",
"optparse",
"os",
"ossaudiodev",
"parser",
"pathlib",
"pdb",
"pickle",
"pickletools",
"pipes",
"pkgutil",
"platform",
"plistlib",
"poplib",
"posix",
"pprint",
"profile",
"pstats",
"pty",
"pwd",
"py_compile",
"pyclbr",
"pydoc",
"queue",
"quopri",
"random",
"re",
"readline",
"reprlib",
"resource",
"rlcompleter",
"runpy",
"sched",
"secrets",
"select",
"selectors",
"shelve",
"shlex",
"shutil",
"signal",
"site",
"smtpd",
"smtplib",
"sndhdr",
"socket",
"socketserver",
"spwd",
"sqlite3",
"ssl",
"stat",
"statistics",
"string",
"stringprep",
"struct",
"subprocess",
"sunau",
"symbol",
"symtable",
"sys",
"sysconfig",
"syslog",
"tabnanny",
"tarfile",
"telnetlib",
"tempfile",
"termios",
"test",
"textwrap",
"threading",
"time",
"timeit",
"tkinter",
"token",
"tokenize",
"trace",
"traceback",
"tracemalloc",
"tty",
"turtle",
"turtledemo",
"types",
"typing",
"unicodedata",
"unittest",
"urllib",
"uu",
"uuid",
"venv",
"warnings",
"wave",
"weakref",
"webbrowser",
"winreg",
"winsound",
"wsgiref",
"xdrlib",
"xml",
"xmlrpc",
"zipapp",
"zipfile",
"zipimport",
"zlib",
}
class ProgramNamingAnalyzer(ProgramAnalyzer):
def start_analysis(self, main_file_path, imported_file_paths):
self.completion_handler(self, list(self._get_warnings(main_file_path)))
def _get_warnings(self, main_file_path):
# TODO: current dir may be different
main_file_dir = os.path.dirname(main_file_path)
if not os.path.isdir(main_file_dir):
return []
library_modules = known_stdlib_modules | self._get_3rd_party_modules()
for item in os.listdir(main_file_dir):
full_path = os.path.join(main_file_dir, item)
if item.endswith(".py") and item[:-3] in library_modules:
if is_same_path(full_path, main_file_path):
prelude = "Your program file is named '%s'." % item
rename_hint = " (*File → Rename…* )"
else:
prelude = (
"Your working directory `%s <%s>`__ contains a file named '%s'.\n\n"
% (rst_utils.escape(main_file_dir), rst_utils.escape(main_file_dir), item)
)
rename_hint = ""
yield {
"filename": full_path,
"lineno": 0,
"symbol": "file-shadows-library-module",
"msg": "Possibly bad file name",
"explanation_rst": prelude
+ "\n\n"
+ "When you try to import library module ``%s``, your file will be imported instead.\n\n"
% item[:-3]
+ "Rename your '%s'%s to make the library module visible again."
% (item, rename_hint),
"group": "warnings",
"relevance": 5,
}
def _get_3rd_party_modules(self):
proxy = get_runner().get_backend_proxy()
if not isinstance(proxy, CPythonProxy):
return []
try:
sys_path = proxy.get_sys_path()
except Exception:
logging.exception("Can't get sys path from proxy")
return []
module_names = set()
for item in sys_path:
if os.path.isdir(item) and ("site-packages" in item or "dist-packages" in item):
module_names.update(self._get_module_names(item))
for name in os.listdir(item):
if "-" not in name:
module_names.add(name.replace(".py", ""))
return module_names
def _get_module_names(self, dir_path):
result = set()
for name in os.listdir(dir_path):
if "-" not in name:
result.add(name.replace(".py", ""))
return result
def load_plugin():
add_program_analyzer(ProgramNamingAnalyzer)
| 20.915825 | 109 | 0.506439 |
4a1fe1cdd5a05698d7597e42d78a64f0ba1a20da | 19,877 | py | Python | code/environment.py | caipeide/drift_drl | 9f3a125f76ac2c0415014a761a514a511553322c | [
"MIT"
] | 45 | 2020-05-25T06:56:29.000Z | 2022-02-20T02:29:23.000Z | code/environment.py | caipeide/drift_drl | 9f3a125f76ac2c0415014a761a514a511553322c | [
"MIT"
] | 6 | 2021-03-07T04:34:24.000Z | 2021-11-06T04:11:43.000Z | code/environment.py | caipeide/drift_drl | 9f3a125f76ac2c0415014a761a514a511553322c | [
"MIT"
] | 9 | 2020-07-21T04:55:37.000Z | 2021-12-24T07:46:13.000Z |
from __future__ import print_function
import time
import collections
import datetime
import glob
import logging
import math
import os
import random
import re
import sys
import weakref
try:
import pygame
except ImportError:
raise RuntimeError('cannot import pygame, make sure pygame package is installed')
try:
import numpy as np
except ImportError:
raise RuntimeError(
'cannot import numpy, make sure numpy package is installed')
import carla
from carla import ColorConverter as cc
from agents.navigation.roaming_agent import RoamingAgent
from agents.navigation.basic_agent import BasicAgent
from carla_tools import *
import argparse
from collections import deque
import pandas as pd
step_T_bound = (0.6,1) # Boundary of throttle values
step_S_bound = (-0.8,0.8) # Boundary of the steering angle values
def draw_waypoints(world, route):
x0 = route[0,0]
y0 = route[0,1]
for k in range(1,route.shape[0]):
r = route[k,:]
x1 = r[0]
y1 = r[1]
dx = x1-x0
dy = y1-y0
if math.sqrt(dx*dx+dy*dy) > 30: # original 2.5
x0 = x1
y0 = y1
begin = carla.Location(x = x1,y = y1, z = 0.2)
angle = math.radians(r[2])
end = begin + carla.Location(x=6*math.cos(angle), y=6*math.sin(angle))
world.debug.draw_arrow(begin, end, arrow_size=12,life_time=90, color=carla.Color(238,18, 137,0))
class environment():
def __init__(self, throttleSize=4, steerSize=9, traj_num = 0, collectFlag = False, model='dqn', vehicleNum=1):
log_level = logging.INFO
logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)
logging.info('listening to server %s:%s', '127.0.0.1', 2000)
self.refreshRoute(traj_num, vehicleNum) # a series of caral.transform
self.vehicleNum = vehicleNum
if not collectFlag:
start_location = carla.Location(x = self.route[0,0], y = self.route[0,1], z = 0.1)
start_rotation = carla.Rotation(pitch = 0, yaw = self.route[0,2], roll = 0)
else:
start_location = carla.Location()
start_rotation = carla.Rotation()
self.start_point = carla.Transform(location = start_location, rotation = start_rotation) # type : Transform (location, rotation)
self.client = carla.Client('127.0.0.1', 2000)
self.client.set_timeout(4.0)
self.display = pygame.display.set_mode((1280, 720),pygame.HWSURFACE | pygame.DOUBLEBUF)
self.hud = HUD(1280, 720)
self.world = World(self.client.get_world(), self.hud, 'vehicle.*', self.start_point, vehicleNum)
self.clock = pygame.time.Clock()
self.minDis = 0
self.collectFlag = collectFlag
self.traj_drawn_list = []
self.control = carla.VehicleControl(
throttle = 1,
steer = 0.0,
brake = 0.0,
hand_brake = False,
reverse = False,
manual_gear_shift = False,
gear = 0)
self.destinationFlag = False
self.away = False
self.collisionFlag = False
self.waypoints_ahead = []
self.waypoints_neighbor = []
self.steer_history = deque(maxlen=20)
self.throttle_history = deque(maxlen=20)
self.velocity_local = []
self.model = model
if model == 'dqn':
self.step_T_pool = [step_T_bound[0]]
self.step_S_pool = [step_S_bound[0]]
t_step_rate = (step_T_bound[1]- step_T_bound[0])/throttleSize
s_step_rate = (step_S_bound[1]- step_S_bound[0])/steerSize
for i in range(throttleSize):
self.step_T_pool.append(self.step_T_pool[-1]+t_step_rate)
for i in range(steerSize):
self.step_S_pool.append(self.step_S_pool[-1]+s_step_rate)
print(self.step_T_pool)
print(self.step_S_pool)
self.tStateNum = len(self.step_T_pool)
self.sStateNum = len(self.step_S_pool)
self.e_heading = 0
self.e_d_heading = 0
self.e_dis = 0
self.e_d_dis = 0
self.e_slip = 0
self.e_d_slip = 0
self.e_vx = 0
self.e_d_vx = 0
self.e_vy = 0
self.e_d_vy = 0
self.tg = 0
self.clock_history = 0 # pop the current location into self.waypoints_history every 0.2s
self.k_heading = 0.1
self.waypoints_ahead_local = []
self.waypoints_history = deque(maxlen=5)
self.waypoints_history_local = []
self.last_steer = 0.0
self.last_throttle = 0.0
self.tire_friction_array = np.arange(3,4.1,0.1) # [3,4], 11D
self.mass_array = np.arange(1700,1910,50) # array([1700, 1750, 1800, 1850, 1900])
self.ori_physics_control = self.world.player.get_physics_control()
self.wheel_fl = self.ori_physics_control.wheels[0]
self.wheel_fr = self.ori_physics_control.wheels[1]
self.wheel_rl = self.ori_physics_control.wheels[2]
self.wheel_rr = self.ori_physics_control.wheels[3]
self.world.world.set_weather(carla.WeatherParameters.ClearNoon)
def refreshRoute(self, traj_num, vehicleNum):
if vehicleNum == 1:
traj = pd.read_csv('ref_trajectory/traj_' + str(traj_num) + '.csv')
else:
traj = pd.read_csv('ref_trajectory/traj_different_vehicles/' + str(vehicleNum) + '.csv')
self.route = traj.values
self.route_x = self.route[:,0]
self.route_y = self.route[:,1]
self.route_length = np.zeros(self.route.shape[0])
for i in range(1, self.route.shape[0]):
dx = self.route_x[i-1] - self.route_x[i]
dy = self.route_y[i-1] - self.route_y[i]
self.route_length[i] = self.route_length[i-1] + np.sqrt(dx * dx + dy * dy)
def step(self, actionID = 4, steer = 0, throttle=0, manual_control = False):
# apply the computed control commands, update endFlag and return state/reward
if not manual_control:
if self.model == 'dqn':
self.control = self.getAction(actionID=actionID)
else:
self.control = self.getAction(steer = steer,throttle = throttle)
if self.model == 'sac':
self.control.steer = 0.1*self.control.steer + 0.9*self.last_steer
self.control.throttle = 0.3*self.control.throttle + 0.7*self.last_throttle
if self.model == 'ddpg':
self.control.steer = 0.6*self.control.steer + 0.4*self.last_steer
self.control.throttle = 0.3*self.control.throttle + 0.7*self.last_throttle
self.last_steer = self.control.steer
self.last_throttle = self.control.throttle
self.world.player.apply_control(self.control)
self.steer_history.append(self.control.steer)
self.throttle_history.append(self.control.throttle)
time.sleep(0.05)
if manual_control and not self.collectFlag:
control = self.world.player.get_control()
self.steer_history.append(control.steer)
self.throttle_history.append(control.throttle)
time.sleep(0.05)
newState = self.getState()
if not self.collectFlag :
reward = self.getReward(newState, self.steer_history, self.throttle_history)
self.collisionFlag = self.collisionDetect()
return newState, reward, self.collisionFlag, self.destinationFlag, self.away, self.control
else:
control = self.world.player.get_control()
return newState, control
def reset(self, traj_num = 0, collect_x = 0, collect_y = 0, collect_yaw = 0, randomPosition = False, testFlag = False,
test_friction = 3.5, test_mass = 1800.0, differentFriction=False, differentVehicles=False):
# random change the tire friction and vehicle mass:
if not testFlag:
index_friction = np.random.randint(0,self.tire_friction_array.shape[0])
index_mass = np.random.randint(0,self.mass_array.shape[0])
self.tire_friction = self.tire_friction_array[index_friction]
self.mass = self.mass_array[index_mass]
else:
self.tire_friction = test_friction
self.mass = test_mass
if not differentFriction:
self.wheel_fl.tire_friction = self.tire_friction
self.wheel_fr.tire_friction = self.tire_friction
self.wheel_rl.tire_friction = self.tire_friction
self.wheel_rr.tire_friction = self.tire_friction
else:
self.wheel_fl.tire_friction = 2.8
self.wheel_fr.tire_friction = 2.8
self.wheel_rl.tire_friction = 4.2
self.wheel_rr.tire_friction = 4.2
wheels = [self.wheel_fl, self.wheel_fr, self.wheel_rl, self.wheel_rr]
self.ori_physics_control.wheels = wheels
if not differentVehicles:
self.ori_physics_control.mass = float(self.mass)
self.world.player.apply_physics_control(self.ori_physics_control)
time.sleep(0.5)
# detect:
physics = self.world.player.get_physics_control()
print('firction: {}, mass: {}'.format(physics.wheels[0].tire_friction, physics.mass))
print('center of mass: ', physics.center_of_mass.x, physics.center_of_mass.y, physics.center_of_mass.z)
if not self.collectFlag:
self.refreshRoute(traj_num, self.vehicleNum)
if not randomPosition:
start_location = carla.Location(x = self.route[0,0], y = self.route[0,1], z = 0.1)
start_rotation = carla.Rotation(pitch = 0, yaw = self.route[0,2], roll = 0)
velocity_local = [10,0] # 5m/s
angular_velocity = carla.Vector3D()
else:
k = np.random.randint(0,self.route.shape[0] - 100)
start_location = carla.Location(x = self.route[k,0], y = self.route[k,1], z = 0.1)
start_rotation = carla.Rotation(pitch = 0, yaw = self.route[k,2], roll = 0)
velocity_local = [10, 0]
# angular_velocity = carla.Vector3D(z = self.route[k,6])
angular_velocity = carla.Vector3D()
else:
start_location = carla.Location(x = collect_x, y=collect_y)
start_rotation = carla.Rotation(yaw = collect_yaw)
self.start_point = carla.Transform(location = start_location, rotation = start_rotation) # type : Transform (location, rotation)
ego_yaw = self.start_point.rotation.yaw
if not self.collectFlag:
if traj_num not in self.traj_drawn_list:
self.drawPoints()
self.traj_drawn_list.append(traj_num)
ego_yaw = ego_yaw/180.0 * 3.141592653
transformed_world_velocity = self.velocity_local2world(velocity_local, ego_yaw)
self.world.player.set_transform(self.start_point)
self.world.player.set_velocity(transformed_world_velocity)
self.world.player.set_angular_velocity(angular_velocity)
self.world.player.apply_control(carla.VehicleControl())
self.world.collision_sensor.history = []
self.away = False
self.endFlag = False
self.steer_history.clear()
self.throttle_history.clear()
self.waypoints_neighbor = []
self.waypoints_ahead = []
self.waypoints_ahead_local = [] # carla.location 10pts
self.waypoints_history.clear() # carla.location 5pts
self.waypoints_history_local = []
self.destinationFlag = False
self.last_steer = 0.0
self.last_throttle = 0.0
self.drived_distance = 0
print('RESET!\n\n')
return 0
def getState(self):
location = self.world.player.get_location()
angular_velocity = self.world.player.get_angular_velocity()
transform = self.world.player.get_transform()
ego_yaw = transform.rotation.yaw
if ego_yaw < 0:
ego_yaw += 360
if ego_yaw > 360:
ego_yaw -= 360
ego_yaw = ego_yaw/180.0 * 3.141592653
self.getNearby() # will update self.minDis
self.getLocalHistoryWay(location, ego_yaw)
self.getLocalFutureWay(location, ego_yaw)
self.velocity_world2local(ego_yaw) # will update self.velocity_local
ego_yaw = ego_yaw/3.141592653 * 180
if ego_yaw > 180:
ego_yaw = -(360-ego_yaw)
if self.collectFlag:
state = [location.x, location.y, ego_yaw, self.velocity_local[0], self.velocity_local[1], self.velocity_local[2], angular_velocity.z]
self.control = self.world.player.get_control()
steer = self.control.steer
ct = time.time()
if ct - self.clock_history > 0.3:
self.waypoints_history.append(np.array([location.x, location.y, steer, self.velocity_local[2]]))
self.clock_history = ct
return state
else:
dt = time.time() - self.tg
self.e_d_dis = (self.minDis - self.e_dis) / dt
self.e_dis = self.minDis
if self.e_dis > 15:
self.away = True
# error of heading:
# 1. calculate the abs
way_yaw = self.waypoints_ahead[0,2]
# 2. update the way_yaw based on vector guidance field:
vgf_left = self.vgf_direction(location)
# 3. if the vehicle is on the left of the nearst waypoint, according to the heading of the waypoint
if vgf_left:
way_yaw = math.atan(self.k_heading * self.e_dis)/3.141592653*180 + way_yaw
else:
way_yaw = -math.atan(self.k_heading * self.e_dis)/3.141592653*180 + way_yaw
if way_yaw > 180:
way_yaw = -(360-way_yaw)
if way_yaw < -180:
way_yaw += 360
if ego_yaw*way_yaw > 0:
e_heading = abs(ego_yaw - way_yaw)
else:
e_heading = abs(ego_yaw) + abs(way_yaw)
if e_heading > 180:
e_heading = 360 - e_heading
# considering the +-:
# waypoint to the vehicle, if clockwise, then +
hflag = 1
if ego_yaw*way_yaw > 0:
if ego_yaw > 0:
if abs(way_yaw) < abs(ego_yaw):
hflag = -1
else:
hflag = 1
if ego_yaw < 0:
if abs(way_yaw) < abs(ego_yaw):
hflag = 1
else:
hflag = -1
else:
if ego_yaw > 0:
t_yaw = ego_yaw-180
if way_yaw > t_yaw:
hflag = -1
else:
hflag = 1
else:
t_yaw = ego_yaw + 180
if way_yaw > t_yaw:
hflag = -1
else:
hflag = 1
e_heading = e_heading * hflag
if e_heading * self.e_heading > 0:
if e_heading > 0:
self.e_d_heading = (e_heading - self.e_heading)/dt
else:
self.e_d_heading = -(e_heading - self.e_heading)/dt
else:
self.e_d_heading = (abs(e_heading) - abs(self.e_heading)) / dt
self.e_heading = e_heading
e_slip = self.velocity_local[2] - self.waypoints_ahead[0,5]
self.e_d_slip = (e_slip - self.e_slip)/dt
self.e_slip = e_slip
e_vx = self.velocity_local[0] - self.waypoints_ahead[0,3]
self.e_d_vx = (e_vx - self.e_vx)/dt
self.e_vx = e_vx
e_vy = self.velocity_local[1] - self.waypoints_ahead[0,4]
self.e_d_vy = (e_vy - self.e_vy)/dt
self.e_vy = e_vy
self.control = self.world.player.get_control()
steer = self.control.steer
throttle = self.control.throttle
ct = time.time()
if ct - self.clock_history > 0.2:
self.waypoints_history.append(np.array([location.x, location.y, steer, self.velocity_local[2]]))
self.clock_history = ct
vx = self.velocity_local[0]
vy = self.velocity_local[1]
e_d_slip = self.e_d_slip
if math.sqrt(vx*vx + vy*vy) < 2: # if the speed is too small we ignore the error of slip angle
e_slip = 0
e_d_slip = 0
state = [steer, throttle , self.e_dis, self.e_d_dis, self.e_heading, self.e_d_heading, e_slip, e_d_slip,
self.e_vx, self.e_d_vx, self.e_vy, self.e_d_vy]
state.extend([k[0] for k in self.waypoints_ahead_local]) #x
state.extend([k[1] for k in self.waypoints_ahead_local]) #y
state.extend([k[2] for k in self.waypoints_ahead_local]) #slip
self.tg = time.time()
return state
def getReward(self, state, steer_history, throttle_history):
e_dis = state[2]
e_slip = state[6]
e_heading = state[4]
std_steer = np.array(steer_history)
std_steer = std_steer.std()
std_throttle = np.array(throttle_history)
std_throttle = std_throttle.std()
r_dis = np.exp(-0.5*e_dis)
if abs(e_heading)<90:
r_heading = np.exp(-0.1*abs(e_heading))
elif (e_heading)>= 90:
r_heading = -np.exp(-0.1*(180-e_heading))
else:
r_heading = -np.exp(-0.1*(e_heading+180))
if abs(e_slip)<90:
r_slip = np.exp(-0.1*abs(e_slip))
elif (e_slip)>= 90:
r_slip = -np.exp(-0.1*(180-e_slip))
else:
r_slip = -np.exp(-0.1*(e_slip+180))
r_std_steer = np.exp(-2*std_steer)
r_std_throttle = np.exp(-2*std_throttle)
vx = self.velocity_local[0]
vy = self.velocity_local[1]
v = math.sqrt(vx*vx + vy*vy)
reward = v*(40*r_dis + 40*r_heading + 20*r_slip)
if v < 6:
reward = reward / 2
return reward
def getNearby(self):
self.waypoints_ahead = []
self.waypoints_neighbor = []
egoLocation = self.world.player.get_location()
dx_array = self.route_x - egoLocation.x
dy_array = self.route_y - egoLocation.y
dis_array = np.sqrt(dx_array * dx_array + dy_array * dy_array)
self.minDis = np.amin(dis_array)
_ = np.where(dis_array == self.minDis)
index = _[0][0] # index for the min distance to all waypoints.
self.drived_distance = self.route_length[index]
self.waypoints_ahead = self.route[index:,:]
if index >= 20:
index_st = index - 20
else:
index_st = 0
self.waypoints_neighbor = self.route[index_st:,:]
self.traj_index = index
def drawPoints(self):
draw_waypoints(self.world.player.get_world(), self.route)
def render(self):
# show ROS client window by pygame
self.world.tick(self.clock, self.e_dis, self.e_heading, self.velocity_local[2] )
self.world.render(self.display)
pygame.display.flip()
def velocity_world2local(self,yaw):
velocity_world = self.world.player.get_velocity()
vx = velocity_world.x
vy = velocity_world.y
yaw = -yaw
local_x = float(vx * math.cos(yaw) - vy * math.sin(yaw))
local_y = float(vy * math.cos(yaw) + vx * math.sin(yaw))
if local_x != 0:
slip_angle = math.atan(local_y/local_x)/3.1415926*180
else:
slip_angle = 0
self.velocity_local = [local_x,local_y,slip_angle]
def velocity_local2world(self, velocity_local, yaw):
vx = velocity_local[0]
vy = velocity_local[1]
world_x = vx * math.cos(yaw) - vy * math.sin(yaw)
world_y = vy * math.cos(yaw) + vx * math.sin(yaw)
return carla.Vector3D(world_x,world_y,0)
def collisionDetect(self):
if self.world.collision_sensor.history:
return True
else:
return False
def getAction(self,actionID=4,steer=0, throttle=0):
if self.model == 'dqn':
throttleID = int(actionID / self.sStateNum)
steerID = int(actionID % self.sStateNum)
self.control = carla.VehicleControl(
throttle = self.step_T_pool[throttleID],
steer = self.step_S_pool[steerID],
brake = 0.0,
hand_brake = False,
reverse = False,
manual_gear_shift = False,
gear = 0)
else:
self.control = carla.VehicleControl(
throttle = throttle,
steer = steer,
brake = 0.0,
hand_brake = False,
reverse = False,
manual_gear_shift = False,
gear = 0)
return self.control
def coordinateTransform(self,egoLocation,yaw):
# transfer the nearest waypoint to the local coordinate.
way_x = self.waypoints_ahead[0,0]
way_y = self.waypoints_ahead[0,1]
yaw = -yaw
dx = way_x - egoLocation.x
dy = way_y - egoLocation.y
nx = dx * math.cos(yaw) - dy * math.sin(yaw)
ny = dy * math.cos(yaw) + dx * math.sin(yaw)
if nx > 0 and ny > 0:
return 1
elif nx> 0 and ny < 0:
return 2
elif nx<0 and ny < 0:
return 3
elif nx<0 and ny>0:
return 4
def getLocalFutureWay(self,egoLocation,yaw):
# transfer the future waypoints (#10) to the local coordinate.
# x, y, slip (degree)
ways = self.waypoints_ahead[0:-1:5,:] # filter to 1m between way pts
if ways.shape[0] < 11:
self.destinationFlag = True
self.waypoints_ahead_local = []
yaw = -yaw
for w in ways[0:10]:
wx = w[0]
wy = w[1]
w_slip = w[5]
dx = wx - egoLocation.x
dy = wy - egoLocation.y
nx = dx * math.cos(yaw) - dy * math.sin(yaw)
ny = dy * math.cos(yaw) + dx * math.sin(yaw)
self.waypoints_ahead_local.append(np.array([nx,ny,w_slip]))
def getLocalHistoryWay(self,egoLocation,yaw):
# x, y, steer, slip (degree)
ways = self.waypoints_history
yaw = -yaw
self.waypoints_history_local = []
if len(ways) < 5:
for i in range(5 - len(ways)):
self.waypoints_history_local.append(np.array([0,0,0,0]))
for w in ways:
wx = w[0]
wy = w[1]
w_steer = w[2]
w_slip = w[3]
dx = wx - egoLocation.x
dy = wy - egoLocation.y
nx = dx * math.cos(yaw) - dy * math.sin(yaw)
ny = dy * math.cos(yaw) + dx * math.sin(yaw)
self.waypoints_history_local.append(np.array([nx,ny,w_steer,w_slip]))
def vgf_direction(self,egoLocation):
way_x = self.waypoints_ahead[0,0]
way_y = self.waypoints_ahead[0,1]
yaw = -self.waypoints_ahead[0,2]/180.0 * 3.141592653
dx = egoLocation.x - way_x
dy = egoLocation.y - way_y
nx = dx * math.cos(yaw) - dy * math.sin(yaw)
ny = dy * math.cos(yaw) + dx * math.sin(yaw)
if ny < 0:
return True
else:
return False
| 29.755988 | 136 | 0.687478 |
4a1fe3860432b735234ddd4ad45ca1cc532908ed | 8,184 | py | Python | pymongo/server_description.py | nloadholtes/mongo-python-driver | 2818a32855a53799b58343bff0a46c5227057b19 | [
"Apache-2.0"
] | 9 | 2021-02-15T05:53:17.000Z | 2022-02-25T01:47:09.000Z | pymongo/server_description.py | nloadholtes/mongo-python-driver | 2818a32855a53799b58343bff0a46c5227057b19 | [
"Apache-2.0"
] | 5 | 2022-02-04T13:00:12.000Z | 2022-02-24T18:13:24.000Z | pymongo/server_description.py | nloadholtes/mongo-python-driver | 2818a32855a53799b58343bff0a46c5227057b19 | [
"Apache-2.0"
] | 7 | 2022-02-05T20:29:14.000Z | 2022-03-26T13:16:44.000Z | # Copyright 2014-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Represent one server the driver is connected to."""
from bson import EPOCH_NAIVE
from pymongo.server_type import SERVER_TYPE
from pymongo.ismaster import IsMaster
from pymongo.monotonic import time as _time
class ServerDescription(object):
"""Immutable representation of one server.
:Parameters:
- `address`: A (host, port) pair
- `ismaster`: Optional IsMaster instance
- `round_trip_time`: Optional float
- `error`: Optional, the last error attempting to connect to the server
"""
__slots__ = (
'_address', '_server_type', '_all_hosts', '_tags', '_replica_set_name',
'_primary', '_max_bson_size', '_max_message_size',
'_max_write_batch_size', '_min_wire_version', '_max_wire_version',
'_round_trip_time', '_me', '_is_writable', '_is_readable',
'_ls_timeout_minutes', '_error', '_set_version', '_election_id',
'_cluster_time', '_last_write_date', '_last_update_time',
'_topology_version')
def __init__(
self,
address,
ismaster=None,
round_trip_time=None,
error=None):
self._address = address
if not ismaster:
ismaster = IsMaster({})
self._server_type = ismaster.server_type
self._all_hosts = ismaster.all_hosts
self._tags = ismaster.tags
self._replica_set_name = ismaster.replica_set_name
self._primary = ismaster.primary
self._max_bson_size = ismaster.max_bson_size
self._max_message_size = ismaster.max_message_size
self._max_write_batch_size = ismaster.max_write_batch_size
self._min_wire_version = ismaster.min_wire_version
self._max_wire_version = ismaster.max_wire_version
self._set_version = ismaster.set_version
self._election_id = ismaster.election_id
self._cluster_time = ismaster.cluster_time
self._is_writable = ismaster.is_writable
self._is_readable = ismaster.is_readable
self._ls_timeout_minutes = ismaster.logical_session_timeout_minutes
self._round_trip_time = round_trip_time
self._me = ismaster.me
self._last_update_time = _time()
self._error = error
self._topology_version = ismaster.topology_version
if error:
if hasattr(error, 'details') and isinstance(error.details, dict):
self._topology_version = error.details.get('topologyVersion')
if ismaster.last_write_date:
# Convert from datetime to seconds.
delta = ismaster.last_write_date - EPOCH_NAIVE
self._last_write_date = delta.total_seconds()
else:
self._last_write_date = None
@property
def address(self):
"""The address (host, port) of this server."""
return self._address
@property
def server_type(self):
"""The type of this server."""
return self._server_type
@property
def server_type_name(self):
"""The server type as a human readable string.
.. versionadded:: 3.4
"""
return SERVER_TYPE._fields[self._server_type]
@property
def all_hosts(self):
"""List of hosts, passives, and arbiters known to this server."""
return self._all_hosts
@property
def tags(self):
return self._tags
@property
def replica_set_name(self):
"""Replica set name or None."""
return self._replica_set_name
@property
def primary(self):
"""This server's opinion about who the primary is, or None."""
return self._primary
@property
def max_bson_size(self):
return self._max_bson_size
@property
def max_message_size(self):
return self._max_message_size
@property
def max_write_batch_size(self):
return self._max_write_batch_size
@property
def min_wire_version(self):
return self._min_wire_version
@property
def max_wire_version(self):
return self._max_wire_version
@property
def set_version(self):
return self._set_version
@property
def election_id(self):
return self._election_id
@property
def cluster_time(self):
return self._cluster_time
@property
def election_tuple(self):
return self._set_version, self._election_id
@property
def me(self):
return self._me
@property
def logical_session_timeout_minutes(self):
return self._ls_timeout_minutes
@property
def last_write_date(self):
return self._last_write_date
@property
def last_update_time(self):
return self._last_update_time
@property
def round_trip_time(self):
"""The current average latency or None."""
# This override is for unittesting only!
if self._address in self._host_to_round_trip_time:
return self._host_to_round_trip_time[self._address]
return self._round_trip_time
@property
def error(self):
"""The last error attempting to connect to the server, or None."""
return self._error
@property
def is_writable(self):
return self._is_writable
@property
def is_readable(self):
return self._is_readable
@property
def mongos(self):
return self._server_type == SERVER_TYPE.Mongos
@property
def is_server_type_known(self):
return self.server_type != SERVER_TYPE.Unknown
@property
def retryable_writes_supported(self):
"""Checks if this server supports retryable writes."""
return (
self._ls_timeout_minutes is not None and
self._server_type in (SERVER_TYPE.Mongos, SERVER_TYPE.RSPrimary))
@property
def retryable_reads_supported(self):
"""Checks if this server supports retryable writes."""
return self._max_wire_version >= 6
@property
def topology_version(self):
return self._topology_version
def to_unknown(self, error=None):
unknown = ServerDescription(self.address, error=error)
unknown._topology_version = self.topology_version
return unknown
def __eq__(self, other):
if isinstance(other, ServerDescription):
return ((self._address == other.address) and
(self._server_type == other.server_type) and
(self._min_wire_version == other.min_wire_version) and
(self._max_wire_version == other.max_wire_version) and
(self._me == other.me) and
(self._all_hosts == other.all_hosts) and
(self._tags == other.tags) and
(self._replica_set_name == other.replica_set_name) and
(self._set_version == other.set_version) and
(self._election_id == other.election_id) and
(self._primary == other.primary) and
(self._ls_timeout_minutes ==
other.logical_session_timeout_minutes) and
(self._error == other.error))
return NotImplemented
def __ne__(self, other):
return not self == other
def __repr__(self):
errmsg = ''
if self.error:
errmsg = ', error=%r' % (self.error,)
return "<%s %s server_type: %s, rtt: %s%s>" % (
self.__class__.__name__, self.address, self.server_type_name,
self.round_trip_time, errmsg)
# For unittesting only. Use under no circumstances!
_host_to_round_trip_time = {}
| 31.96875 | 79 | 0.649804 |
4a1fe38f40b1bbe57bde3fa5b5f9b5019e9f0902 | 643 | py | Python | sp_tst_button.py | jbarry1506/jcbhub | d7fa7a5a58903eed2393eaa17c89d56d2b68151b | [
"Apache-2.0"
] | null | null | null | sp_tst_button.py | jbarry1506/jcbhub | d7fa7a5a58903eed2393eaa17c89d56d2b68151b | [
"Apache-2.0"
] | null | null | null | sp_tst_button.py | jbarry1506/jcbhub | d7fa7a5a58903eed2393eaa17c89d56d2b68151b | [
"Apache-2.0"
] | null | null | null | import RPi.GPIO as GPIO
from time import sleep
# Sets pin numbering scheme to BCM
GPIO.setmode(GPIO.BCM)
# Logic output pin for the power strip (positive). Other power strip wire will go to GND.
GPIO.setup(16, GPIO.OUT)
# Input from pushbutton, using internal pulldown resistor. Other button wire will connect to 5V pin.
GPIO.setup(26, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)
print(GPIO.input(26))
while True:
if GPIO.input(26) == 1:
print("Button Pusher! Button Pusher!")
continue
elif GPIO.input(26) == 0:
GPIO.output(16,1)
print("down")
sleep(2)
GPIO.output(16,0)
continue
| 29.227273 | 101 | 0.671851 |
4a1fe3e2d4b012c4327325fc92e5352de8bb4465 | 7,218 | py | Python | src/testingSimulatedEyeBlinking.py | AndileNdlovu96/faceswapping-2d-image-in-3d-space- | 2ba0fbafeb3ec41ca6ef27ef5aaab67fc117c7e0 | [
"MIT"
] | 2 | 2021-06-14T06:15:22.000Z | 2021-11-01T02:59:35.000Z | src/testingSimulatedEyeBlinking.py | AndileNdlovu96/faceswapping-2d-image-in-3d-space- | 2ba0fbafeb3ec41ca6ef27ef5aaab67fc117c7e0 | [
"MIT"
] | null | null | null | src/testingSimulatedEyeBlinking.py | AndileNdlovu96/faceswapping-2d-image-in-3d-space- | 2ba0fbafeb3ec41ca6ef27ef5aaab67fc117c7e0 | [
"MIT"
] | null | null | null | import dlib
import cv2
import numpy as np
from dlib import rectangle
predictorPath = "../faceModels/shape_predictor_68_face_landmarks.dat"
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictorPath)
def extractFacialLandmarks(faceROI, img, imgScale, predictor):
upscaledFaceROI = rectangle(int(faceROI.left() / imgScale), int(faceROI.top() / imgScale),
int(faceROI.right() / imgScale), int(faceROI.bottom() / imgScale))
# predict facial landmark points
facialLandmarks = predictor(img, upscaledFaceROI)
# make an array of the landmark points with 68 (x,y) coordinates
facialLandmarkCoords = np.array([[p.x, p.y] for p in facialLandmarks.parts()])
# transpose the landmark points so that we deal with a 2xn and not an nx2 model, it makes
# calculations easier along the way when its a row for x's and a row for y's
return facialLandmarkCoords.T
def downScaleImg(img, imgScale, maxImgSizeForDetection):
scaledImg = img
if max(img.shape) > maxImgSizeForDetection:
imgScale = maxImgSizeForDetection / float(max(img.shape))
scaledImg = cv2.resize(img, (int(img.shape[1] * imgScale), int(img.shape[0] * imgScale)))
return scaledImg, imgScale
def getFacialLandmarks(textureImage, detector, predictor, maxImgSizeForDetection=640):
imgScale = 1
downScaledImg, imgScale = downScaleImg(textureImage, imgScale, maxImgSizeForDetection)
# detect face on smaller image (much faster)
detectedFacesROI = detector(downScaledImg, 1)
# return nothing if no faces are found
if len(detectedFacesROI) == 0:
return None
# list of facial landmarks for each face in the mapped image
facialLandmarksList = []
for faceROI in detectedFacesROI:
facialLandmarks = extractFacialLandmarks(faceROI, textureImage, imgScale, predictor)
facialLandmarksList.append(facialLandmarks)
# return list of faces
return facialLandmarksList
def reshape_for_polyline(array):
# do not know what the outer dimension is, but make it an 1x2 now
return np.array(array, np.int32).reshape((-1, 1, 2))
def drawImposterLandmarks(frame, landmarks, black_image):
#black_image = np.zeros(frame.shape, np.uint8)
landmarks = landmarks.T
jaw = reshape_for_polyline(landmarks[0:17])
left_eyebrow = reshape_for_polyline(landmarks[22:27])
right_eyebrow = reshape_for_polyline(landmarks[17:22])
nose_bridge = reshape_for_polyline(landmarks[27:31])
lower_nose = reshape_for_polyline(landmarks[30:35])
left_eye = reshape_for_polyline(landmarks[42:48])
right_eye = reshape_for_polyline(landmarks[36:42])
outer_lip = reshape_for_polyline(landmarks[48:60])
inner_lip = reshape_for_polyline(landmarks[60:68])
color = (255, 255, 255)
thickness = 3
cv2.polylines(black_image, [jaw], False, color, thickness)
cv2.polylines(black_image, [left_eyebrow], False, color, thickness)
cv2.polylines(black_image, [right_eyebrow], False, color, thickness)
cv2.polylines(black_image, [nose_bridge], False, color, thickness)
cv2.polylines(black_image, [lower_nose], True, color, thickness)
cv2.polylines(black_image, [left_eye], True, color, thickness)
cv2.polylines(black_image, [right_eye], True, color, thickness)
cv2.polylines(black_image, [outer_lip], True, color, thickness)
cv2.polylines(black_image, [inner_lip], True, color, thickness)
return black_image
cap = cv2.VideoCapture(0)
while True:
image = cap.read()[1]
image = cv2.flip(image, 1)
faces = getFacialLandmarks(image, detector, predictor, 320)
black_image = np.zeros(image.shape, np.uint8)
another_black_image = np.zeros(image.shape, np.uint8)
left_eye_frame = np.zeros(image.shape, np.uint8)
right_eye_frame = np.zeros(image.shape, np.uint8)
nose_frame = np.zeros(image.shape, np.uint8)
mouth_frame = np.zeros(image.shape, np.uint8)
if faces is not None:
for facialLandmarks2D in faces:
# draw the landmarks of all the faces detected from the source frame
black_image = drawImposterLandmarks(image, facialLandmarks2D, black_image)
left_eye = facialLandmarks2D.T[42:48]
center_of_left_eye = (int(1 / 2 * (np.max(left_eye[:, 0]) + np.min(left_eye[:, 0]))),
int(1 / 2 * (np.max(left_eye[:, 1]) + np.min(left_eye[:, 1]))))
right_eye = facialLandmarks2D.T[36:42]
center_of_right_eye = (int(1 / 2 * (np.max(right_eye[:, 0]) + np.min(right_eye[:, 0]))),
int(1 / 2 * (np.max(right_eye[:, 1]) + np.min(right_eye[:, 1]))))
outer_lip = facialLandmarks2D.T[48:60]
center_of_mouth = (int(1 / 2 * (np.max(outer_lip[:, 0]) + np.min(outer_lip[:, 0]))),
int(1 / 2 * (np.max(outer_lip[:, 1]) + np.min(outer_lip[:, 1]))))
nose_ridge = facialLandmarks2D.T[27:31]
center_of_nose_ridge = (int(1 / 2 * (np.max(nose_ridge[:, 0]) + np.min(nose_ridge[:, 0]))),
int(1 / 2 * (np.max(nose_ridge[:, 1]) + np.min(nose_ridge[:, 1]))))
lower_nose = facialLandmarks2D.T[30:35]
center_of_lower_nose = (int(1 / 2 * (np.max(lower_nose[:, 0]) + np.min(lower_nose[:, 0]))),
int(1 / 2 * (np.max(lower_nose[:, 1]) + np.min(lower_nose[:, 1]))))
center_of_nose = ((center_of_lower_nose[0] + center_of_nose_ridge[0]) // 2,
(center_of_lower_nose[1] + center_of_nose_ridge[1]) // 2)
left_eye_OutLine = cv2.convexHull(left_eye)
right_eye_OutLine = cv2.convexHull(right_eye)
face_Outline = cv2.convexHull(facialLandmarks2D.T)
cv2.fillConvexPoly(another_black_image, face_Outline, (255, 255, 255))
masked_face = cv2.bitwise_and(image, another_black_image)
masked_face_copy = cv2.bitwise_and(image, another_black_image)
cv2.circle(masked_face, center_of_left_eye, 1, (0, 0, 255), 1)
cv2.circle(masked_face, center_of_left_eye, 20, (0, 0, 255), 1)
cv2.circle(masked_face, center_of_right_eye, 1, (0, 0, 255), 1)
cv2.circle(masked_face, center_of_right_eye, 20, (0, 0, 255), 1)
cv2.circle(masked_face, center_of_mouth, 1, (0, 0, 255), 1)
cv2.circle(masked_face, center_of_mouth, 35, (0, 0, 255), 1)
cv2.circle(masked_face, center_of_nose, 1, (0, 0, 255), 1)
cv2.circle(masked_face, center_of_nose, 35, (0, 0, 255), 1)
cv2.circle(masked_face_copy, center_of_left_eye, 20, (0, 0, 255), -1)
cv2.circle(masked_face_copy, center_of_right_eye, 20, (0, 0, 255), -1)
cv2.circle(masked_face_copy, center_of_mouth, 35, (0, 0, 255), -1)
cv2.circle(masked_face_copy, center_of_nose, 35, (0, 0, 255), -1)
cv2.imshow('try', masked_face)
cv2.imshow('try try', masked_face_copy)
cv2.imshow('something', image)
cv2.imshow('landmarks', black_image)
key = cv2.waitKey(1)
if key == 27:
break
cap.release()
cv2.destroyAllWindows() | 46.87013 | 103 | 0.656137 |
4a1fe49cc44947da06e2df0dba0e2905f7ef2553 | 39,846 | py | Python | venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortios/plugins/modules/fortios_dlp_fp_doc_source.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 1 | 2020-01-22T13:11:23.000Z | 2020-01-22T13:11:23.000Z | venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortios/plugins/modules/fortios_dlp_fp_doc_source.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 12 | 2020-02-21T07:24:52.000Z | 2020-04-14T09:54:32.000Z | venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortios/plugins/modules/fortios_dlp_fp_doc_source.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | null | null | null | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019-2020 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_dlp_fp_doc_source
short_description: Create a DLP fingerprint database by allowing the FortiGate to access a file server containing files from which to create fingerprints in
Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify dlp feature and fp_doc_source category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.0
version_added: "2.10"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Hongbin Lu (@fgtdev-hblu)
- Frank Shen (@frankshen01)
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Legacy fortiosapi has been deprecated, httpapi is the preferred way to run playbooks
requirements:
- ansible>=2.9.0
options:
access_token:
description:
- Token-based authentication.
Generated from GUI of Fortigate.
type: str
required: false
enable_log:
description:
- Enable/Disable logging for task.
type: bool
required: false
default: false
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
member_path:
type: str
description:
- Member attribute path to operate on.
- Delimited by a slash character if there are more than one attribute.
- Parameter marked with member_path is legitimate for doing member operation.
member_state:
type: str
description:
- Add or delete a member under specified attribute path.
- When member_state is specified, the state option is ignored.
choices:
- present
- absent
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
dlp_fp_doc_source:
description:
- Create a DLP fingerprint database by allowing the FortiGate to access a file server containing files from which to create fingerprints.
default: null
type: dict
suboptions:
date:
description:
- Day of the month on which to scan the server (1 - 31).
type: int
file_path:
description:
- Path on the server to the fingerprint files (max 119 characters).
type: str
file_pattern:
description:
- Files matching this pattern on the server are fingerprinted. Optionally use the * and ? wildcards.
type: str
keep_modified:
description:
- Enable so that when a file is changed on the server the FortiGate keeps the old fingerprint and adds a new fingerprint to the database.
type: str
choices:
- enable
- disable
name:
description:
- Name of the DLP fingerprint database.
required: true
type: str
password:
description:
- Password required to log into the file server.
type: str
period:
description:
- Frequency for which the FortiGate checks the server for new or changed files.
type: str
choices:
- none
- daily
- weekly
- monthly
remove_deleted:
description:
- Enable to keep the fingerprint database up to date when a file is deleted from the server.
type: str
choices:
- enable
- disable
scan_on_creation:
description:
- Enable to keep the fingerprint database up to date when a file is added or changed on the server.
type: str
choices:
- enable
- disable
scan_subdirectories:
description:
- Enable/disable scanning subdirectories to find files to create fingerprints from.
type: str
choices:
- enable
- disable
sensitivity:
description:
- Select a sensitivity or threat level for matches with this fingerprint database. Add sensitivities using fp-sensitivity. Source dlp
.fp-sensitivity.name.
type: str
server:
description:
- IPv4 or IPv6 address of the server.
type: str
server_type:
description:
- Protocol used to communicate with the file server. Currently only Samba (SMB) servers are supported.
type: str
choices:
- samba
tod_hour:
description:
- Hour of the day on which to scan the server (0 - 23).
type: int
tod_min:
description:
- Minute of the hour on which to scan the server (0 - 59).
type: int
username:
description:
- User name required to log into the file server.
type: str
vdom:
description:
- Select the VDOM that can communicate with the file server.
type: str
choices:
- mgmt
- current
weekday:
description:
- Day of the week on which to scan the server.
type: str
choices:
- sunday
- monday
- tuesday
- wednesday
- thursday
- friday
- saturday
'''
EXAMPLES = '''
- collections:
- fortinet.fortios
connection: httpapi
hosts: fortigate01
vars:
ansible_httpapi_port: 443
ansible_httpapi_use_ssl: true
ansible_httpapi_validate_certs: false
vdom: root
tasks:
- name: fortios_dlp_fp_doc_source
fortios_dlp_fp_doc_source:
vdom: root
state: present
dlp_fp_doc_source:
date: 1
file_path: /
file_pattern: '*'
keep_modified: enable
name: '1'
period: none
remove_deleted: enable
scan_on_creation: enable
scan_subdirectories: enable
server: 1.1.1.1
server_type: samba
tod_hour: 1
tod_min: 0
username: sgh
vdom: mgmt
weekday: sunday
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import FortiOSHandler
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import check_legacy_fortiosapi
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import schema_to_module_spec
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import check_schema_versioning
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import FAIL_SOCKET_MSG
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.comparison import is_same_comparison
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.comparison import serialize
def filter_dlp_fp_doc_source_data(json):
option_list = ['date', 'file_path', 'file_pattern',
'keep_modified', 'name', 'password',
'period', 'remove_deleted', 'scan_on_creation',
'scan_subdirectories', 'sensitivity', 'server',
'server_type', 'tod_hour', 'tod_min',
'username', 'vdom', 'weekday']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def dlp_fp_doc_source(data, fos, check_mode=False):
vdom = data['vdom']
state = data['state']
dlp_fp_doc_source_data = data['dlp_fp_doc_source']
filtered_data = underscore_to_hyphen(filter_dlp_fp_doc_source_data(dlp_fp_doc_source_data))
# check_mode starts from here
if check_mode:
mkey = fos.get_mkey('dlp', 'fp_doc_source', filtered_data, vdom=vdom)
current_data = fos.get('dlp', 'fp_doc_source', vdom=vdom, mkey=mkey)
is_existed = current_data and current_data.get('http_status') == 200 \
and isinstance(current_data.get('results'), list) \
and len(current_data['results']) > 0
# 2. if it exists and the state is 'present' then compare current settings with desired
if state == 'present' or state is True:
if mkey is None:
return False, True, filtered_data
# if mkey exists then compare each other
# record exits and they're matched or not
if is_existed:
is_same = is_same_comparison(
serialize(current_data['results'][0]), serialize(filtered_data))
return False, not is_same, filtered_data
# record does not exist
return False, True, filtered_data
if state == 'absent':
if mkey is None:
return False, False, filtered_data
if is_existed:
return False, True, filtered_data
return False, False, filtered_data
return True, False, {'reason: ': 'Must provide state parameter'}
if state == "present" or state is True:
return fos.set('dlp',
'fp-doc-source',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('dlp',
'fp-doc-source',
mkey=filtered_data['name'],
vdom=vdom)
else:
fos._module.fail_json(msg='state must be present or absent!')
def is_successful_status(resp):
return 'status' in resp and resp['status'] == 'success' or \
'http_status' in resp and resp['http_status'] == 200 or \
'http_method' in resp and resp['http_method'] == "DELETE" and resp['http_status'] == 404
def fortios_dlp(data, fos, check_mode):
fos.do_member_operation('dlp_fp_doc_source')
if data['dlp_fp_doc_source']:
resp = dlp_fp_doc_source(data, fos, check_mode)
else:
fos._module.fail_json(msg='missing task body: %s' % ('dlp_fp_doc_source'))
if check_mode:
return resp
return not is_successful_status(resp), \
is_successful_status(resp) and \
(resp['revision_changed'] if 'revision_changed' in resp else True), \
resp
versioned_schema = {
"type": "list",
"children": {
"username": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"remove_deleted": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"scan_subdirectories": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"server_type": {
"type": "string",
"options": [
{
"value": "samba",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"sensitivity": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"tod_min": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"file_pattern": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"period": {
"type": "string",
"options": [
{
"value": "none",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "daily",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "weekly",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "monthly",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"server": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"tod_hour": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"weekday": {
"type": "string",
"options": [
{
"value": "sunday",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "monday",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "tuesday",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "wednesday",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "thursday",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "friday",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "saturday",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"keep_modified": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"scan_on_creation": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"date": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"password": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"file_path": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"vdom": {
"type": "string",
"options": [
{
"value": "mgmt",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "current",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
def main():
module_spec = schema_to_module_spec(versioned_schema)
mkeyname = 'name'
fields = {
"access_token": {"required": False, "type": "str", "no_log": True},
"enable_log": {"required": False, "type": bool},
"vdom": {"required": False, "type": "str", "default": "root"},
"member_path": {"required": False, "type": "str"},
"member_state": {
"type": "str",
"required": False,
"choices": ["present", "absent"]
},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"dlp_fp_doc_source": {
"required": False, "type": "dict", "default": None,
"options": {
}
}
}
for attribute_name in module_spec['options']:
fields["dlp_fp_doc_source"]['options'][attribute_name] = module_spec['options'][attribute_name]
if mkeyname and mkeyname == attribute_name:
fields["dlp_fp_doc_source"]['options'][attribute_name]['required'] = True
check_legacy_fortiosapi()
module = AnsibleModule(argument_spec=fields,
supports_check_mode=True)
versions_check_result = None
if module._socket_path:
connection = Connection(module._socket_path)
if 'access_token' in module.params:
connection.set_option('access_token', module.params['access_token'])
if 'enable_log' in module.params:
connection.set_option('enable_log', module.params['enable_log'])
else:
connection.set_option('enable_log', False)
fos = FortiOSHandler(connection, module, mkeyname)
versions_check_result = check_schema_versioning(fos, versioned_schema, "dlp_fp_doc_source")
is_error, has_changed, result = fortios_dlp(module.params, fos, module.check_mode)
else:
module.fail_json(**FAIL_SOCKET_MSG)
if versions_check_result and versions_check_result['matched'] is False:
module.warn("Ansible has detected version mismatch between FortOS system and your playbook, see more details by specifying option -vvv")
if not is_error:
if versions_check_result and versions_check_result['matched'] is False:
module.exit_json(changed=has_changed, version_check_warning=versions_check_result, meta=result)
else:
module.exit_json(changed=has_changed, meta=result)
else:
if versions_check_result and versions_check_result['matched'] is False:
module.fail_json(msg="Error in repo", version_check_warning=versions_check_result, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| 33.371859 | 157 | 0.381393 |
4a1fe4a0856f48bcbbdf5278266f9db81b036ec0 | 14,339 | py | Python | models/account.py | rocketgithub/fel_ecofactura | f95982b0c4adb66332e1df71d7b3a403c2d34362 | [
"BSD-3-Clause"
] | null | null | null | models/account.py | rocketgithub/fel_ecofactura | f95982b0c4adb66332e1df71d7b3a403c2d34362 | [
"BSD-3-Clause"
] | null | null | null | models/account.py | rocketgithub/fel_ecofactura | f95982b0c4adb66332e1df71d7b3a403c2d34362 | [
"BSD-3-Clause"
] | 2 | 2020-06-15T16:45:21.000Z | 2020-07-03T04:07:33.000Z | # -*- encoding: utf-8 -*-
from openerp import models, fields, api, _
from openerp.exceptions import Warning
from datetime import datetime
from lxml import etree
import base64
import logging
import zeep
class AccountInvoice(models.Model):
_inherit = "account.invoice"
firma_fel = fields.Char('Firma FEL', copy=False)
serie_fel = fields.Char('Serie FEL', copy=False)
numero_fel = fields.Char('Numero FEL', copy=False)
factura_original_id = fields.Many2one('account.invoice', string="Factura original FEL")
consignatario_fel = fields.Many2one('res.partner', string="Consignatario o Destinatario FEL")
comprador_fel = fields.Many2one('res.partner', string="Comprador FEL")
exportador_fel = fields.Many2one('res.partner', string="Exportador FEL")
incoterm_fel = fields.Char(string="Incoterm FEL")
pdf_fel = fields.Binary('PDF FEL', copy=False)
pdf_fel_name = fields.Char('Nombre PDF FEL', default='pdf_fel.pdf', size=32)
documento_xml_fel = fields.Binary('Documento xml FEL', copy=False)
documento_xml_fel_name = fields.Char('Nombre doc xml FEL', default='documento_xml_fel.xml', size=32)
resultado_xml_fel = fields.Binary('Resultado xml FEL', copy=False)
resultado_xml_fel_name = fields.Char('Resultado doc xml FEL', default='resultado_xml_fel.xml', size=32)
@api.multi
def invoice_validate(self):
for factura in self:
if factura.journal_id.generar_fel and not factura.firma_fel:
stdTWS = etree.Element("stdTWS", xmlns="FEL")
TrnEstNum = etree.SubElement(stdTWS, "TrnEstNum")
TrnEstNum.text = factura.journal_id.codigo_establecimiento_fel
TipTrnCod = etree.SubElement(stdTWS, "TipTrnCod")
TipTrnCod.text = factura.journal_id.tipo_documento_fel
TrnNum = etree.SubElement(stdTWS, "TrnNum")
TrnNum.text = str(factura.id)
TrnFec = etree.SubElement(stdTWS, "TrnFec")
TrnFec.text = str(factura.date_invoice)
MonCod = etree.SubElement(stdTWS, "MonCod")
MonCod.text = "GTQ"
TrnBenConNIT = etree.SubElement(stdTWS, "TrnBenConNIT")
TrnBenConNIT.text = factura.partner_id.vat.replace('-', '') or ''
TrnFec = etree.SubElement(stdTWS, "TrnExp")
TrnFec.text = "1" if factura.tipo_gasto == "importacion" else "0"
TrnFec = etree.SubElement(stdTWS, "TrnExento")
TrnFec.text = "0"
TrnFec = etree.SubElement(stdTWS, "TrnFraseTipo")
TrnFec.text = "0"
TrnFec = etree.SubElement(stdTWS, "TrnEscCod")
TrnFec.text = "1" if factura.tipo_gasto == "importacion" else "0"
TrnEFACECliCod = etree.SubElement(stdTWS, "TrnEFACECliCod")
TrnEFACECliNom = etree.SubElement(stdTWS, "TrnEFACECliNom")
TrnEFACECliNom.text = factura.partner_id.name
TrnEFACECliDir = etree.SubElement(stdTWS, "TrnEFACECliDir")
TrnEFACECliDir.text = factura.partner_id.street or ""
TrnObs = etree.SubElement(stdTWS, "TrnObs")
TrnObs.text = factura.comment or ""
TrnEMail = etree.SubElement(stdTWS, "TrnEmail")
if factura.partner_id.email:
TrnEMail.text = factura.partner_id.email
TrnCampAd01 = etree.SubElement(stdTWS, "TrnCampAd01")
TrnCampAd01.text = eval(factura.company_id.trncampad01_fel) if factura.company_id.trncampad01_fel else ""
TrnCampAd02 = etree.SubElement(stdTWS, "TrnCampAd02")
TrnCampAd02.text = eval(factura.company_id.trncampad02_fel) if factura.company_id.trncampad02_fel else ""
TrnCampAd03 = etree.SubElement(stdTWS, "TrnCampAd03")
TrnCampAd03.text = eval(factura.company_id.trncampad03_fel) if factura.company_id.trncampad03_fel else ""
TrnCampAd04 = etree.SubElement(stdTWS, "TrnCampAd04")
TrnCampAd04.text = eval(factura.company_id.trncampad04_fel) if factura.company_id.trncampad04_fel else ""
TrnCampAd05 = etree.SubElement(stdTWS, "TrnCampAd05")
TrnCampAd05.text = eval(factura.company_id.trncampad05_fel) if factura.company_id.trncampad05_fel else ""
TrnCampAd06 = etree.SubElement(stdTWS, "TrnCampAd06")
TrnCampAd06.text = eval(factura.company_id.trncampad06_fel) if factura.company_id.trncampad06_fel else ""
TrnCampAd07 = etree.SubElement(stdTWS, "TrnCampAd07")
TrnCampAd08 = etree.SubElement(stdTWS, "TrnCampAd08")
TrnCampAd09 = etree.SubElement(stdTWS, "TrnCampAd09")
TrnCampAd10 = etree.SubElement(stdTWS, "TrnCampAd10")
TrnCampAd11 = etree.SubElement(stdTWS, "TrnCampAd11")
TrnCampAd12 = etree.SubElement(stdTWS, "TrnCampAd12")
TrnCampAd13 = etree.SubElement(stdTWS, "TrnCampAd13")
TrnCampAd14 = etree.SubElement(stdTWS, "TrnCampAd14")
TrnCampAd15 = etree.SubElement(stdTWS, "TrnCampAd15")
TrnCampAd16 = etree.SubElement(stdTWS, "TrnCampAd16")
TrnCampAd17 = etree.SubElement(stdTWS, "TrnCampAd17")
TrnCampAd18 = etree.SubElement(stdTWS, "TrnCampAd18")
TrnCampAd19 = etree.SubElement(stdTWS, "TrnCampAd19")
TrnCampAd20 = etree.SubElement(stdTWS, "TrnCampAd20")
TrnCampAd21 = etree.SubElement(stdTWS, "TrnCampAd21")
TrnCampAd22 = etree.SubElement(stdTWS, "TrnCampAd22")
TrnCampAd23 = etree.SubElement(stdTWS, "TrnCampAd23")
TrnCampAd24 = etree.SubElement(stdTWS, "TrnCampAd24")
TrnCampAd25 = etree.SubElement(stdTWS, "TrnCampAd25")
TrnCampAd26 = etree.SubElement(stdTWS, "TrnCampAd26")
TrnCampAd27 = etree.SubElement(stdTWS, "TrnCampAd27")
TrnCampAd28 = etree.SubElement(stdTWS, "TrnCampAd28")
TrnCampAd29 = etree.SubElement(stdTWS, "TrnCampAd29")
TrnCampAd30 = etree.SubElement(stdTWS, "TrnCampAd30")
stdTWSD = etree.SubElement(stdTWS, "stdTWSD")
num = 1
for linea in factura.invoice_line:
stdTWSDIt = etree.SubElement(stdTWSD, "stdTWS.stdTWSCIt.stdTWSDIt")
TrnLiNum = etree.SubElement(stdTWSDIt, "TrnLiNum")
TrnLiNum.text = str(num)
num += 1
TrnArtCod = etree.SubElement(stdTWSDIt, "TrnArtCod")
if linea.product_id.default_code:
TrnArtCod.text = linea.product_id.default_code
else:
TrnArtCod.text = str(linea.product_id.id)
TrnArtNom = etree.SubElement(stdTWSDIt, "TrnArtNom")
TrnArtNom.text = linea.name
TrnCan = etree.SubElement(stdTWSDIt, "TrnCan")
TrnCan.text = str(linea.quantity)
TrnVUn = etree.SubElement(stdTWSDIt, "TrnVUn")
TrnVUn.text = str(linea.price_unit)
TrnUniMed = etree.SubElement(stdTWSDIt, "TrnUniMed")
TrnUniMed.text = "UNIDAD"
TrnVDes = etree.SubElement(stdTWSDIt, "TrnVDes")
TrnVDes.text = str(( linea.price_unit * linea.quantity ) * ( linea.discount / 100 ) )
TrnArtBienSer = etree.SubElement(stdTWSDIt, "TrnArtBienSer")
if linea.product_id.type == 'product':
TrnArtBienSer.text = "B"
else:
TrnArtBienSer.text = "S"
TrnArtImpAdiCod = etree.SubElement(stdTWSDIt, "TrnArtImpAdiCod")
TrnArtImpAdiCod.text = "0"
TrnArtImpAdiUniGrav = etree.SubElement(stdTWSDIt, "TrnArtImpAdiUniGrav")
TrnArtImpAdiUniGrav.text = "0"
TrnDetCampAd01 = etree.SubElement(stdTWSDIt, "TrnDetCampAdi01")
TrnDetCampAd02 = etree.SubElement(stdTWSDIt, "TrnDetCampAdi02")
TrnDetCampAd03 = etree.SubElement(stdTWSDIt, "TrnDetCampAdi03")
TrnDetCampAd04 = etree.SubElement(stdTWSDIt, "TrnDetCampAdi04")
TrnDetCampAd05 = etree.SubElement(stdTWSDIt, "TrnDetCampAdi05")
if factura.journal_id.tipo_documento_fel == "FCAM":
stdTWSCam = etree.SubElement(stdTWS, "stdTWSCam")
stdTWSCamIt = etree.SubElement(stdTWSCam, "stdTWS.stdTWSCam.stdTWSCamIt")
TrnAbonoNum = etree.SubElement(stdTWSCamIt, "TrnAbonoNum")
TrnAbonoNum.text = "1"
TrnAbonoFecVen = etree.SubElement(stdTWSCamIt, "TrnAbonoFecVen")
TrnAbonoFecVen.text = str(factura.date_due)
TrnAbonoMonto = etree.SubElement(stdTWSCamIt, "TrnAbonoMonto")
TrnAbonoMonto.text = str(factura.amount_total)
if factura.journal_id.tipo_documento_fel in ["NCRE", "NDEB"]:
stdTWSCam = etree.SubElement(stdTWS, "stdTWSNota")
stdTWSCamIt = etree.SubElement(stdTWSCam, "stdTWS.stdTWSNota.stdTWSNotaIt")
TDFEPRegimenAntiguo = etree.SubElement(stdTWSCamIt, "TDFEPRegimenAntiguo")
TDFEPRegimenAntiguo.text = "0"
TDFEPAutorizacion = etree.SubElement(stdTWSCamIt, "TDFEPAutorizacion")
TDFEPAutorizacion.text = factura.factura_original_id.firma_fel if factura.factura_original_id else ""
TDFEPSerie = etree.SubElement(stdTWSCamIt, "TDFEPSerie")
TDFEPSerie.text = factura.factura_original_id.serie_fel if factura.factura_original_id else ""
TDFEPNumero = etree.SubElement(stdTWSCamIt, "TDFEPNumero")
TDFEPNumero.text = factura.factura_original_id.numero_fel if factura.factura_original_id else ""
TDFEPNumero = etree.SubElement(stdTWSCamIt, "TDFEPFecEmision")
TDFEPNumero.text = factura.factura_original_id.date_invoice if factura.factura_original_id else ""
xmls = etree.tostring(stdTWS, xml_declaration=True, encoding="UTF-8")
logging.warn(xmls.decode('utf8'))
wsdl = "https://www.facturaenlineagt.com/adocumento?wsdl"
if factura.company_id.pruebas_fel:
wsdl = "http://pruebas.ecofactura.com.gt:8080/fel/adocumento?wsdl"
client = zeep.Client(wsdl=wsdl)
resultado = client.service.Execute(factura.company_id.vat, factura.company_id.usuario_fel, factura.company_id.clave_fel, factura.company_id.vat, xmls)
logging.warn(resultado)
resultadoBytes = bytes(bytearray(resultado, encoding='utf-8'))
resultadoXML = etree.XML(resultadoBytes)
if resultadoXML.xpath("/DTE"):
dte = resultadoXML.xpath("/DTE")
factura.firma_fel = dte[0].get("NumeroAutorizacion")
factura.serie_fel = dte[0].get("Serie")
factura.numero_fel = dte[0].get("Numero")
factura.documento_xml_fel = base64.b64encode(xmls)
factura.pdf_fel = resultadoXML.xpath("/DTE/Pdf")[0].text
factura.resultado_xml_fel = resultadoXML.xpath("/DTE/Xml")[0].text
else:
raise Warning(resultado)
return super(AccountInvoice, self).invoice_validate()
@api.multi
def action_cancel(self):
cancel_resultado = super(AccountInvoice, self).action_cancel()
if cancel_resultado:
for factura in self:
if factura.journal_id.generar_fel and factura.firma_fel:
wsdl = "https://www.facturaenlineagt.com/aanulacion?wsdl"
if factura.company_id.pruebas_fel:
wsdl = "http://pruebas.ecofactura.com.gt:8080/fel/aanulacion?wsdl"
client = zeep.Client(wsdl=wsdl)
resultado = client.service.Execute(factura.company_id.vat, factura.company_id.usuario_fel, factura.company_id.clave_fel, factura.company_id.vat, factura.firma_fel, factura.comment)
logging.warn(resultado)
resultadoBytes = bytes(bytearray(resultado, encoding='utf-8'))
resultadoXML = etree.XML(resultadoBytes)
if not resultadoXML.xpath("/DTE"):
raise Warning(resultado)
return cancel_resultado
@api.multi
def action_cancel_draft(self):
for factura in self:
if factura.journal_id.generar_fel and factura.firma_fel:
raise Warning("La factura ya fue enviada, por lo que ya no puede ser modificada")
else:
return super(AccountInvoice, self).action_cancel_draft()
class AccountJournal(models.Model):
_inherit = "account.journal"
generar_fel = fields.Boolean('Generar FEL')
codigo_establecimiento_fel = fields.Char('Numero Establecimiento FEL')
tipo_documento_fel = fields.Selection([('FACT', 'FACT'), ('FCAM', 'FCAM'), ('FPEQ', 'FPEQ'), ('FCAP', 'FCAP'), ('FESP', 'FESP'), ('NABN', 'NABN'), ('RDON', 'RDON'), ('RECI', 'RECI'), ('NDEB', 'NDEB'), ('NCRE', 'NCRE')], 'Tipo de Documento FEL',)
class ResCompany(models.Model):
_inherit = "res.company"
usuario_fel = fields.Char('Usuario FEL')
clave_fel = fields.Char('Clave FEL')
pruebas_fel = fields.Boolean('Pruebas FEL')
trncampad01_fel = fields.Char(string="TrnCampAd01 FEL")
trncampad02_fel = fields.Char(string="TrnCampAd02 FEL")
trncampad03_fel = fields.Char(string="TrnCampAd03 FEL")
trncampad04_fel = fields.Char(string="TrnCampAd04 FEL")
trncampad05_fel = fields.Char(string="TrnCampAd05 FEL")
trncampad06_fel = fields.Char(string="TrnCampAd06 FEL") | 59.252066 | 249 | 0.610015 |
4a1fe4dcab60dcd91c85a1b5ae83ce7af99fad3c | 5,695 | py | Python | sgx/utils/logging.py | squillero/sgx-devel | 6227364a2cfef9680826daa103d9d9de50487164 | [
"Apache-2.0"
] | null | null | null | sgx/utils/logging.py | squillero/sgx-devel | 6227364a2cfef9680826daa103d9d9de50487164 | [
"Apache-2.0"
] | 4 | 2020-11-21T09:41:50.000Z | 2021-04-22T09:37:41.000Z | sgx/utils/logging.py | squillero/sgx-devel | 6227364a2cfef9680826daa103d9d9de50487164 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#############################################################################
# _________ ____________ ___ #
# / _____// _____/\ \/ / ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# \_____ \/ \ ___ \ / THE E(X)TENDED (S)ELFISH (G)ENE ALGORITHM #
# / \ \_\ \/ \ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# /_________/\________/___/\ \ https://github.com/squillero/sgx #
# \_/ #
# #
# A quick 'n dirty versatile population-less evolutionary optimizer loosely #
# inspired by a cool interpretation of the Darwinian theory. #
# #
#############################################################################
# Copyright 2021 Giovanni Squillero
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import warnings
from .cpu_time import microgp4_process_time
DefaultLogger = logging.getLogger('sgx')
# Propagate Log levels, and add a few ones
SPAM = logging.DEBUG - 1
DEBUG = logging.DEBUG
VERBOSE = logging.INFO - 1
INFO = logging.INFO
BARE = logging.INFO + 1
CRITICAL = logging.CRITICAL
WARNING = logging.WARNING
ERROR = logging.ERROR
# Setup DefaultLogger's handler
DefaultLogger.handler = list()
DefaultLogger.setLevel(logging.INFO)
DefaultLogger.__doc__ = "Default MicroGP4 logger"
def log_cpu(level: int = INFO, msg: str = "", *args, **kwargs) -> None:
"""Like log(), but including cpu time."""
if msg:
DefaultLogger.log(level, "%s: %s", msg, microgp4_process_time())
else:
DefaultLogger.log(level, "%s", msg, microgp4_process_time())
def log_split(level: int, msg: str) -> None:
for line in msg.split("\n"):
DefaultLogger.log(level, line)
DefaultLogger.log_cpu = log_cpu
DefaultLogger.log_split = log_split
# Log SPAM, VERBOSE & BARE
DefaultLogger.spam = lambda *args, **kwargs: DefaultLogger.log(SPAM, *args, **kwargs)
DefaultLogger.verbose = lambda *args, **kwargs: DefaultLogger.log(VERBOSE, *args, **kwargs)
DefaultLogger.bare = lambda *args, **kwargs: DefaultLogger.log(BARE, *args, **kwargs)
# shortcuts
spam = DefaultLogger.spam
debug = DefaultLogger.debug
verbose = DefaultLogger.verbose
info = DefaultLogger.info
warning = DefaultLogger.warning
error = DefaultLogger.error
critical = DefaultLogger.critical
bare = DefaultLogger.bare
set_level = lambda *args, **kwargs: DefaultLogger.setLevel(*args, **kwargs)
try:
import coloredlogs
logging.addLevelName(SPAM, ' SPAM')
logging.addLevelName(DEBUG, ' DEBUG')
logging.addLevelName(VERBOSE, ' VERBOSE')
logging.addLevelName(INFO, ' INFO')
logging.addLevelName(BARE, '')
logging.addLevelName(WARNING, ' WARNING')
logging.addLevelName(ERROR, ' ERROR')
logging.addLevelName(CRITICAL, ' CRITICAL')
coloredlogs.install(level='INFO',
logger=DefaultLogger,
fmt='%(asctime)s%(levelname)s %(message)s',
datefmt='%H:%M:%S',
field_styles={
'asctime': {
'color': 'cyan'
},
'levelname': {
'color': 'blue',
'bold': False,
'bright': True
}
},
level_styles={
logging.getLevelName(ERROR): {
'color': 'red',
'bold': True
},
logging.getLevelName(CRITICAL): {
'color': 'red',
'bold': True
},
logging.getLevelName(WARNING): {
'color': 'yellow',
'bold': True
},
logging.getLevelName(DEBUG): {
'color': 'black',
'bright': True
},
logging.getLevelName(SPAM): {
'color': 'black',
'bright': True
}
})
except:
WARN_NOCOLOR = "Colored log not available (install module 'coloredlogs')."
warnings.warn(WARN_NOCOLOR, RuntimeWarning)
logging.addLevelName(SPAM, ' [SPAM]')
logging.addLevelName(DEBUG, ' [DEBUG]')
logging.addLevelName(VERBOSE, ' [VERBOSE]')
logging.addLevelName(INFO, ' [INFO]')
logging.addLevelName(BARE, '')
logging.addLevelName(WARNING, ' [WARNING]')
logging.addLevelName(ERROR, ' [ERROR]')
logging.addLevelName(CRITICAL, ' [CRITICAL]')
logging.basicConfig(level='DEBUG', format='%(asctime)s%(levelname)s %(message)s', datefmt='%H:%M:%S')
| 40.105634 | 105 | 0.509043 |
4a1fe5b0e230ed7f071fd1692d7d5ea85b09e2d1 | 3,565 | py | Python | tests/unit/test_model.py | r-peschke/openslides-backend | 83d0dab68bb914f06a0f50cffe23fc10ca45376f | [
"MIT"
] | null | null | null | tests/unit/test_model.py | r-peschke/openslides-backend | 83d0dab68bb914f06a0f50cffe23fc10ca45376f | [
"MIT"
] | null | null | null | tests/unit/test_model.py | r-peschke/openslides-backend | 83d0dab68bb914f06a0f50cffe23fc10ca45376f | [
"MIT"
] | null | null | null | from typing import cast
from unittest import TestCase
from openslides_backend.models import fields
from openslides_backend.models.base import Model
from openslides_backend.shared.patterns import Collection
class FakeModel(Model):
"""
Fake Model for testing purposes.
"""
collection = Collection("fake_model")
verbose_name = "fake_model"
id = fields.IntegerField(required=True)
text = fields.CharField(
required=True, constraints={"description": "The text of this fake model."}
)
fake_model_2_ids = fields.RelationListField(
to=Collection("fake_model_2"), related_name="relation_field"
)
fake_model_2_generic_ids = fields.GenericRelationListField(
to=[Collection("fake_model_2")], related_name="generic_relation_field"
)
class FakeModel2(Model):
"""
Fake model for testing purposes. With relation field.
"""
collection = Collection("fake_model_2")
verbose_name = "fake_model_2"
id = fields.IntegerField(required=True)
relation_field = fields.RelationField(
to=Collection("fake_model"),
related_name="fake_model_2_ids",
)
generic_relation_field = fields.RelationField(
to=Collection("fake_model"),
related_name="fake_model_2_generic_ids",
generic_relation=True,
)
class ModelBaseTester(TestCase):
"""
Tests methods of base Action class and also some helper functions.
"""
def test_get_properties(self) -> None:
expected = {
"id": {"type": "integer"},
"text": {
"description": "The text of this fake model.",
"type": "string",
"minLength": 1,
"maxLength": 256,
},
}
self.assertEqual(FakeModel().get_properties("id", "text"), expected)
def test_get_properties_invalid(self) -> None:
with self.assertRaises(ValueError) as context_manager:
FakeModel().get_properties("unknown_property")
self.assertEqual(
context_manager.exception.args[0],
"unknown_property is not a field of fake_model",
)
def test_get_fields_fake_model(self) -> None:
self.assertEqual(
["fake_model_2_generic_ids", "fake_model_2_ids", "id", "text"],
[field.own_field_name for field in FakeModel().get_fields()],
)
def test_own_collection_attr(self) -> None:
rels = [
FakeModel().get_field("fake_model_2_ids"),
FakeModel().get_field("fake_model_2_generic_ids"),
]
field = cast(fields.BaseRelationField, rels[0])
self.assertEqual(str(field.own_collection), "fake_model")
field = cast(fields.BaseRelationField, rels[1])
self.assertEqual(str(field.own_collection), "fake_model")
def test_get_field_unknown_field(self) -> None:
with self.assertRaises(ValueError):
FakeModel().get_field("Unknown field")
def test_structured_relation_init(self) -> None:
with self.assertRaises(ValueError):
fields.RelationField(
to=Collection("fake_model_tahheque7O"),
related_name="invalid_related_name",
structured_relation=["invalid_structured_relation"],
)
def test_structured_relation_init_2(self) -> None:
with self.assertRaises(ValueError):
fields.RelationField(
to=Collection("fake_model_tahheque7O"),
related_name="invalid_related_name_with_$",
)
| 33.009259 | 82 | 0.643198 |
4a1fe6d73b925e34a7e9e2f5737baea23f2ab282 | 5,461 | py | Python | py2/testdir_single_jvm/test_uncompleted_frame.py | ahmedengu/h2o-3 | ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11 | [
"Apache-2.0"
] | 2 | 2018-09-20T03:28:46.000Z | 2018-12-06T21:39:29.000Z | py2/testdir_single_jvm/test_uncompleted_frame.py | ahmedengu/h2o-3 | ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11 | [
"Apache-2.0"
] | 2 | 2021-06-02T02:24:03.000Z | 2021-11-15T17:51:49.000Z | py2/testdir_single_jvm/test_uncompleted_frame.py | ahmedengu/h2o-3 | ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11 | [
"Apache-2.0"
] | 1 | 2020-04-17T13:06:26.000Z | 2020-04-17T13:06:26.000Z | import unittest, sys
sys.path.extend(['.','..','../..','py'])
import string
print "This test looks more complicated than it is, only because it's a parameterized version of something else"
print "look at sandbox/commands.log for the sequence to h2o (pretty simple)"
import h2o2 as h2o
import h2o_cmd, h2o_import as h2i, h2o_browse as h2b
from h2o_test import find_file, dump_json, verboseprint
expectedZeros = [0, 4914, 656, 24603, 38665, 124, 13, 5, 1338, 51, 320216, 551128, 327648, 544044, 577981,
573487, 576189, 568616, 579415, 574437, 580907, 580833, 579865, 548378, 568602, 551041,
563581, 580413, 581009, 578167, 577590, 579113, 576991, 571753, 580174, 547639, 523260,
559734, 580538, 578423, 579926, 580066, 465765, 550842, 555346, 528493, 535858, 579401,
579121, 580893, 580714, 565439, 567206, 572262, 0]
DO_INTERMEDIATE_RESULTS = True
DO_2X_SRC = False
DO_TEST_BAD_COLNAME = False
DO_TEST_BAD_COL_LENGTH = False
DO_IMPORT_PARSE = True
SINGLE_CSVFILENAME = 'covtype.data'
def assertEqualMsg(a, b): assert a == b, "%s %s" % (a, b)
def parseKeyIndexedCheck(frames_result, multiplyExpected, expectedColumnNames):
# get the name of the frame?
print ""
frame = frames_result['frames'][0]
rows = frame['rows']
columns = frame['columns']
for i,c in enumerate(columns):
label = c['label']
stype = c['type']
missing = c['missing_count']
zeros = c['zero_count']
domain = c['domain']
print "column: %s label: %s type: %s missing: %s zeros: %s domain: %s" %\
(i,label,stype,missing,zeros,domain)
# files are concats of covtype. so multiply expected
# assertEqualMsg(zeros, expectedZeros[i] * multiplyExpected)
assertEqualMsg(label, expectedColumnNames[i])
assertEqualMsg(stype,"int")
# assertEqualMsg(missing, 0)
assertEqualMsg(domain, None)
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_uncompleted_frame(self):
tryList = [
(['covtype.data', 'covtype.shuffled.data', 'covtype.sorted.data'], 3, 30),
]
for (csvFilenameList, multiplyExpected, timeoutSecs) in tryList:
# h2o-dev doesn't take ../.. type paths? make find_file return absolute pathj
a_node = h2o.nodes[0]
# import_result = a_node.import_files(path=find_file("smalldata/logreg/prostate.csv"))
importFolderPath = "/home/0xdiag/datasets/standard"
# keep a list of the keys you import, to feed to parse
kList = []
for csvFilename in csvFilenameList:
csvPathname = importFolderPath + "/" + csvFilename
if not DO_IMPORT_PARSE:
import_result = a_node.import_files(path=csvPathname)
k = import_result['keys'][0]
frames_result = a_node.frames(key=k, row_count=5, timeoutSecs=timeoutSecs)
kList.append(k)
# print "frames_result from the first import_result key", dump_json(frames_result)
print "I think I imported these keys:", kList
# what happens if I put the kList in twice? can it touch the same source file without lock issues?
if DO_2X_SRC:
kList2 = kList + kList
multiplyExpected = 2 * multiplyExpected
else:
kList2 = kList
# try passing column names also.
# questions to try
# what if you pass missing (,,)
# what if you pass too many, too few, or some with same name?
# let's try all the characters
basename = string.printable
# remove the ',' in the string (remember strings are immutable..can't use .replace to remove
# other characters are illegal? [] '
if DO_TEST_BAD_COLNAME:
basename = basename.translate(None, ",[]!#$%&'()*+-./:;<=>?@\^_`{|}~" + '"')
else:
basename = "C"
colLength = 1 if DO_TEST_BAD_COL_LENGTH else 55
expectedColumnNames = map(lambda x: basename + "_" + str(x+1), range(colLength))
column_names = '[' + ','.join(map((lambda x: '"' + x + '"'), expectedColumnNames)) + ']'
kwargs = {
'column_names': column_names,
'intermediateResults': DO_INTERMEDIATE_RESULTS,
}
print kwargs
if DO_IMPORT_PARSE:
multiplyExpected = 1
csvPathname = importFolderPath + "/" + SINGLE_CSVFILENAME
parse_result = h2i.import_parse(path=csvPathname, timeoutSecs=timeoutSecs, **kwargs)
else:
parse_result = a_node.parse(key=kList2, timeoutSecs=timeoutSecs, **kwargs)
k = parse_result['frames'][0]['frame_id']['name']
# print "parse_result:", dump_json(parse_result)
frames_result = a_node.frames(key=k, row_count=5)
# print "frames_result from the first parse_result key", dump_json(frames_result)
# we doubled the keyList, from what was in tryList
parseKeyIndexedCheck(frames_result, multiplyExpected, expectedColumnNames)
if __name__ == '__main__':
h2o.unit_main()
| 39.572464 | 112 | 0.614539 |
4a1fe6f7900dfa3b351aff7e3109dedb44c900bc | 5,966 | py | Python | heat/tests/api/middleware/test_version_negotiation_middleware.py | noironetworks/heat | 7cdadf1155f4d94cf8f967635b98e4012a7acfb7 | [
"Apache-2.0"
] | 265 | 2015-01-02T09:33:22.000Z | 2022-03-26T23:19:54.000Z | heat/tests/api/middleware/test_version_negotiation_middleware.py | noironetworks/heat | 7cdadf1155f4d94cf8f967635b98e4012a7acfb7 | [
"Apache-2.0"
] | 8 | 2015-09-01T15:43:19.000Z | 2021-12-14T05:18:23.000Z | heat/tests/api/middleware/test_version_negotiation_middleware.py | noironetworks/heat | 7cdadf1155f4d94cf8f967635b98e4012a7acfb7 | [
"Apache-2.0"
] | 295 | 2015-01-06T07:00:40.000Z | 2021-09-06T08:05:06.000Z | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from heat.api.middleware import version_negotiation as vn
from heat.tests import common
class VersionController(object):
pass
class VersionNegotiationMiddlewareTest(common.HeatTestCase):
def _version_controller_factory(self, conf):
return VersionController()
def test_match_version_string(self):
version_negotiation = vn.VersionNegotiationFilter(
self._version_controller_factory, None, None)
request = webob.Request({})
major_version = 1
minor_version = 0
match = version_negotiation._match_version_string(
'v{0}.{1}'.format(major_version, minor_version), request)
self.assertTrue(match)
self.assertEqual(major_version, request.environ['api.major_version'])
self.assertEqual(minor_version, request.environ['api.minor_version'])
def test_not_match_version_string(self):
version_negotiation = vn.VersionNegotiationFilter(
self._version_controller_factory, None, None)
request = webob.Request({})
match = version_negotiation._match_version_string("invalid", request)
self.assertFalse(match)
def test_return_version_controller_when_request_path_is_version(self):
version_negotiation = vn.VersionNegotiationFilter(
self._version_controller_factory, None, None)
request = webob.Request({'PATH_INFO': 'versions'})
response = version_negotiation.process_request(request)
self.assertIsInstance(response, VersionController)
def test_return_version_controller_when_request_path_is_empty(self):
version_negotiation = vn.VersionNegotiationFilter(
self._version_controller_factory, None, None)
request = webob.Request({'PATH_INFO': '/'})
response = version_negotiation.process_request(request)
self.assertIsInstance(response, VersionController)
def test_request_path_contains_valid_version(self):
version_negotiation = vn.VersionNegotiationFilter(
self._version_controller_factory, None, None)
major_version = 1
minor_version = 0
request = webob.Request({'PATH_INFO':
'v{0}.{1}/resource'.format(major_version,
minor_version)})
response = version_negotiation.process_request(request)
self.assertIsNone(response)
self.assertEqual(major_version, request.environ['api.major_version'])
self.assertEqual(minor_version, request.environ['api.minor_version'])
def test_removes_version_from_request_path(self):
version_negotiation = vn.VersionNegotiationFilter(
self._version_controller_factory, None, None)
expected_path = 'resource'
request = webob.Request({'PATH_INFO': 'v1.0/{0}'.format(expected_path)
})
response = version_negotiation.process_request(request)
self.assertIsNone(response)
self.assertEqual(expected_path, request.path_info_peek())
def test_request_path_contains_unknown_version(self):
version_negotiation = vn.VersionNegotiationFilter(
self._version_controller_factory, None, None)
request = webob.Request({'PATH_INFO': 'v2.0/resource'})
response = version_negotiation.process_request(request)
self.assertIsInstance(response, VersionController)
def test_accept_header_contains_valid_version(self):
version_negotiation = vn.VersionNegotiationFilter(
self._version_controller_factory, None, None)
major_version = 1
minor_version = 0
request = webob.Request({'PATH_INFO': 'resource'})
request.headers['Accept'] = (
'application/vnd.openstack.orchestration-v{0}.{1}'.format(
major_version, minor_version))
response = version_negotiation.process_request(request)
self.assertIsNone(response)
self.assertEqual(major_version, request.environ['api.major_version'])
self.assertEqual(minor_version, request.environ['api.minor_version'])
def test_accept_header_contains_unknown_version(self):
version_negotiation = vn.VersionNegotiationFilter(
self._version_controller_factory, None, None)
request = webob.Request({'PATH_INFO': 'resource'})
request.headers['Accept'] = (
'application/vnd.openstack.orchestration-v2.0')
response = version_negotiation.process_request(request)
self.assertIsInstance(response, VersionController)
def test_no_URI_version_accept_header_contains_invalid_MIME_type(self):
version_negotiation = vn.VersionNegotiationFilter(
self._version_controller_factory, None, None)
request = webob.Request({'PATH_INFO': 'resource'})
request.headers['Accept'] = 'application/invalidMIMEType'
response = version_negotiation.process_request(request)
self.assertIsInstance(response, webob.exc.HTTPNotFound)
def test_invalid_utf8_path(self):
version_negotiation = vn.VersionNegotiationFilter(
self._version_controller_factory, None, None)
request = webob.Request.blank('/%c0')
response = version_negotiation.process_request(request)
self.assertIsInstance(response, webob.exc.HTTPBadRequest)
| 40.310811 | 78 | 0.700302 |
4a1fe788af068733f2a1306019f96737aa2e1644 | 446 | py | Python | files/rtorrent.py | rullmann/bundlewrap-telegraf | ab08f3b0f4a534821ba3dbcb14ce2f72ca708153 | [
"MIT"
] | null | null | null | files/rtorrent.py | rullmann/bundlewrap-telegraf | ab08f3b0f4a534821ba3dbcb14ce2f72ca708153 | [
"MIT"
] | null | null | null | files/rtorrent.py | rullmann/bundlewrap-telegraf | ab08f3b0f4a534821ba3dbcb14ce2f72ca708153 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import xmlrpc.client
import json
rtorrent = xmlrpc.client.ServerProxy('${node.metadata['telegraf']['rtorrent']['url']}')
data = {}
for datapoint in ['get_memory_usage', 'get_max_memory_usage', 'get_upload_rate', 'get_up_rate', 'get_download_rate', 'get_down_rate', 'get_up_total', 'get_down_total']:
function = getattr(rtorrent, datapoint)
data[datapoint] = function()
json_data = json.dumps(data)
print(json_data)
| 29.733333 | 168 | 0.735426 |
4a1fe848a1a7f7f07f72726e7866d0e93692cf63 | 610 | py | Python | leetcode/logger_rate_limiter.py | sci-c0/python-misc-problems | a0827cc9cd290ca142bba3b7dda307234da63c3c | [
"BSD-3-Clause"
] | null | null | null | leetcode/logger_rate_limiter.py | sci-c0/python-misc-problems | a0827cc9cd290ca142bba3b7dda307234da63c3c | [
"BSD-3-Clause"
] | null | null | null | leetcode/logger_rate_limiter.py | sci-c0/python-misc-problems | a0827cc9cd290ca142bba3b7dda307234da63c3c | [
"BSD-3-Clause"
] | null | null | null | """
https://leetcode.com/problems/logger-rate-limiter/
Tags: Google; Easy
"""
class Logger:
def __init__(self):
self.last_printed = {}
def shouldPrintMessage(self, timestamp: int, message: str) -> bool:
if (message not in self.last_printed) or (
message in self.last_printed and self.last_printed[message] + 10 <= timestamp):
self.last_printed[message] = timestamp
return True
return False
# Your Logger object will be instantiated and called as such:
# obj = Logger()
# param_1 = obj.shouldPrintMessage(timestamp,message)
| 25.416667 | 95 | 0.65082 |
4a1fe8fb84858b19adb303e0fcc82e2f1e8f957f | 116 | py | Python | frappe/query_builder/__init__.py | mokhito/frappe | ebc29efae568a91e52237c134e031f77185fe36d | [
"MIT"
] | 1 | 2020-08-26T22:19:30.000Z | 2020-08-26T22:19:30.000Z | frappe/query_builder/__init__.py | mokhito/frappe | ebc29efae568a91e52237c134e031f77185fe36d | [
"MIT"
] | 1 | 2021-05-11T23:42:56.000Z | 2021-05-11T23:42:56.000Z | frappe/query_builder/__init__.py | mokhito/frappe | ebc29efae568a91e52237c134e031f77185fe36d | [
"MIT"
] | 2 | 2021-08-24T00:57:20.000Z | 2021-08-24T01:30:40.000Z | from pypika import *
from frappe.query_builder.utils import Column, DocType, get_query_builder, patch_query_execute
| 38.666667 | 94 | 0.853448 |
4a1fe9fe18fd1b5ec6fa6bde1c7c5de70cdf833a | 1,711 | py | Python | acct_mgr/opt/tests/__init__.py | t-kenji/trac-account-manager-plugin | 5c89401b4deea1cabc2fa10dc1c43964a3dd26da | [
"Beerware"
] | null | null | null | acct_mgr/opt/tests/__init__.py | t-kenji/trac-account-manager-plugin | 5c89401b4deea1cabc2fa10dc1c43964a3dd26da | [
"Beerware"
] | null | null | null | acct_mgr/opt/tests/__init__.py | t-kenji/trac-account-manager-plugin | 5c89401b4deea1cabc2fa10dc1c43964a3dd26da | [
"Beerware"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (c) 2013-2015 Steffen Hoffmann
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
import unittest
def test_suite():
msg_fail = 'Issue with %s (%s): skipping acct_mgr.opt.tests.%s'
suite = unittest.TestSuite()
try:
import acct_mgr.opt.tests.announcer
except ImportError, e:
print(msg_fail % ('UID changer for TracAnnouncer', e, 'announcer'))
else:
suite.addTest(acct_mgr.opt.tests.announcer.test_suite())
try:
import acct_mgr.opt.tests.tracforms
except ImportError, e:
print(msg_fail % ('UID changer for TracForms', e, 'tracforms'))
else:
suite.addTest(acct_mgr.opt.tests.tracforms.test_suite())
try:
import acct_mgr.opt.tests.tracscreenshots
except ImportError, e:
print(msg_fail % ('UID changer for TracScreenshots', e,
'tracscreenshots'))
else:
suite.addTest(acct_mgr.opt.tests.tracscreenshots.test_suite())
try:
import acct_mgr.opt.tests.tracvote
except ImportError, e:
print(msg_fail % ('UID changer for TracVote', e, 'tracvote'))
else:
suite.addTest(acct_mgr.opt.tests.tracvote.test_suite())
try:
import acct_mgr.opt.tests.radius
except ImportError, e:
print(msg_fail % ('RADIUS auth', e, 'radius'))
else:
suite.addTest(acct_mgr.opt.tests.radius.test_suite())
return suite
# Start test suite directly from command line like so:
# $> PYTHONPATH=$PWD python announcer/opt/tests/__init__.py
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| 29 | 75 | 0.659848 |
4a1fea403583e70b2453dfd737b720bb6eb98110 | 1,865 | py | Python | setup.py | timgates42/python-sanitize | 01fbdccffb194f7969d4a6ec0da0007bc8cc6124 | [
"BSD-2-Clause"
] | 65 | 2015-01-31T07:23:16.000Z | 2020-08-02T05:32:38.000Z | setup.py | timgates42/python-sanitize | 01fbdccffb194f7969d4a6ec0da0007bc8cc6124 | [
"BSD-2-Clause"
] | null | null | null | setup.py | timgates42/python-sanitize | 01fbdccffb194f7969d4a6ec0da0007bc8cc6124 | [
"BSD-2-Clause"
] | 8 | 2016-02-19T08:32:40.000Z | 2021-12-25T11:44:59.000Z | import unittest
import sys
from setuptools import setup, find_packages, Command
PKG_NAME = 'sanitize'
class RunTests(Command):
"""
New setup.py command to run all tests for the package.
"""
description = "run all tests for {0}".format(PKG_NAME)
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
tests = unittest.TestLoader().discover('.')
runner = unittest.TextTestRunner()
results = runner.run(tests)
sys.exit(not results.wasSuccessful())
setup(
name=PKG_NAME,
version=".".join(map(str, __import__(PKG_NAME).__version__)),
description="Bringing sanitiy to world of messed-up data",
long_description=open('README.md').read(),
author="Aaron Swartz",
author_email="[email protected]",
maintainer='Alireza Savand',
maintainer_email='[email protected]',
url='http://www.aaronsw.com/2002/sanitize/',
cmdclass={'test': RunTests},
license=open('LICENSE').read(),
packages=find_packages(exclude=['tests']),
py_modules=[PKG_NAME],
include_package_data=True,
zip_safe=False,
install_requires=open('requirements.txt').read().splitlines(),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.3',
'Programming Language :: Python :: 2.4',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
)
| 29.603175 | 71 | 0.636997 |
4a1feb4e54795de25a4348ae7be6d11b52f5f344 | 3,807 | py | Python | panamap_proto/panamap_proto.py | kirillsulim/panamap-proto | 430bfbba022300b7166351682d99f38b097719c4 | [
"MIT"
] | 1 | 2022-01-20T06:49:23.000Z | 2022-01-20T06:49:23.000Z | panamap_proto/panamap_proto.py | kirillsulim/panamap-proto | 430bfbba022300b7166351682d99f38b097719c4 | [
"MIT"
] | null | null | null | panamap_proto/panamap_proto.py | kirillsulim/panamap-proto | 430bfbba022300b7166351682d99f38b097719c4 | [
"MIT"
] | null | null | null | from typing import Type, Any, Set, Callable, Dict, List, Optional
from google.protobuf.internal.enum_type_wrapper import EnumTypeWrapper
from google.protobuf.message import Message
from google.protobuf.descriptor import FieldDescriptor, EnumDescriptor
from google.protobuf.pyext._message import RepeatedCompositeContainer
from panamap import MappingDescriptor
from panamap.panamap import T
class ProtoMappingDescriptor(MappingDescriptor):
FIELD_CODE_TO_PYTHON_TYPE = {
FieldDescriptor.TYPE_DOUBLE: float,
FieldDescriptor.TYPE_FLOAT: float,
FieldDescriptor.TYPE_INT64: int,
FieldDescriptor.TYPE_UINT64: int,
FieldDescriptor.TYPE_INT32: int,
FieldDescriptor.TYPE_FIXED64: int,
FieldDescriptor.TYPE_FIXED32: int,
FieldDescriptor.TYPE_BOOL: bool,
FieldDescriptor.TYPE_STRING: str,
FieldDescriptor.TYPE_GROUP: None,
FieldDescriptor.TYPE_MESSAGE: None,
FieldDescriptor.TYPE_BYTES: bytes,
FieldDescriptor.TYPE_UINT32: int,
FieldDescriptor.TYPE_ENUM: None,
FieldDescriptor.TYPE_SFIXED32: int,
FieldDescriptor.TYPE_SFIXED64: int,
FieldDescriptor.TYPE_SINT32: int,
FieldDescriptor.TYPE_SINT64: int,
}
@classmethod
def supports_type(cls, t: Type[Any]) -> bool:
if isinstance(t, EnumTypeWrapper):
return True
return issubclass(t, Message)
@classmethod
def resolve_type_name(cls, t: Type[Any]) -> Optional[str]:
if isinstance(t, EnumTypeWrapper):
return t.DESCRIPTOR.name
def __init__(self, t: Type[T]):
super(ProtoMappingDescriptor, self).__init__(t)
descriptor = t.DESCRIPTOR
if isinstance(descriptor, EnumDescriptor):
self.field_names = set()
self.field_types = {}
else:
self.field_names = {field.name for field in t.DESCRIPTOR.fields}
self.field_types = {field.name: self._get_field_type(field) for field in t.DESCRIPTOR.fields}
def get_getter(self, field_name: str) -> Callable[[T], Any]:
def getter(t: T) -> Any:
value = getattr(t, field_name)
if isinstance(value, RepeatedCompositeContainer):
return list(value)
else:
return value
return getter
def get_setter(self, field_name: str) -> Callable[[Dict, Any], None]:
def setter(t: T, value: Any) -> None:
if isinstance(value, Message):
getattr(t, field_name).CopyFrom(value)
else:
setattr(t, field_name, value)
return setter
def get_constructor_args(self) -> Set[str]:
return self.field_names
def get_required_constructor_args(self) -> Set[str]:
return set()
def get_declared_fields(self) -> Set[str]:
return self.field_names
def is_field_supported(self, field_name: str) -> bool:
return field_name in self.field_names
def get_preferred_field_type(self, field_name: str) -> Type[Any]:
return self.field_types.get(field_name, Any)
def is_container_type(self) -> bool:
return False
def _get_field_type(self, field: FieldDescriptor) -> Type[Any]:
repeated = field.label == FieldDescriptor.LABEL_REPEATED
if field.message_type is not None:
type = field.message_type._concrete_class
elif field.enum_type is not None:
type = field.enum_type.name
else:
code = field.type
field_type = self.FIELD_CODE_TO_PYTHON_TYPE.get(code)
if field_type is not None:
type = field_type
else:
type = Any
if repeated:
return List[type]
else:
return type
| 34.297297 | 105 | 0.648542 |
4a1fec4d174ad23bfda4674e8f2f0928011dc51a | 5,468 | py | Python | entity_gym/entity_gym/examples/minefield.py | batu/incubator | 11f0f60de24102af4356c9738cbb9793ea6aa334 | [
"Apache-2.0",
"MIT"
] | null | null | null | entity_gym/entity_gym/examples/minefield.py | batu/incubator | 11f0f60de24102af4356c9738cbb9793ea6aa334 | [
"Apache-2.0",
"MIT"
] | null | null | null | entity_gym/entity_gym/examples/minefield.py | batu/incubator | 11f0f60de24102af4356c9738cbb9793ea6aa334 | [
"Apache-2.0",
"MIT"
] | 1 | 2022-03-30T14:40:03.000Z | 2022-03-30T14:40:03.000Z | from dataclasses import dataclass, field
import numpy as np
import random
from typing import Dict, List, Mapping, Tuple
from entity_gym.environment import (
CategoricalAction,
DenseCategoricalActionMask,
Entity,
Environment,
CategoricalActionSpace,
ActionSpace,
EpisodeStats,
ObsSpace,
Observation,
Action,
)
from entity_gym.dataclass_utils import obs_space_from_dataclasses, extract_features
@dataclass
class Vehicle:
x_pos: float = 0.0
y_pos: float = 0.0
direction: float = 0.0
step: int = 0
@dataclass
class Target:
x_pos: float = 0.0
y_pos: float = 0.0
@dataclass
class Mine:
x_pos: float = 0.0
y_pos: float = 0.0
@dataclass
class Minefield(Environment):
"""
Task with a Vehicle entity that has to reach a target point, receiving a reward of 1.
If the vehicle collides with any of the randomly placed mines, the episode ends without reward.
The available actions either turn the vehicle left, right, or go straight.
"""
vehicle: Vehicle = field(default_factory=Vehicle)
target: Target = field(default_factory=Target)
mine: Mine = field(default_factory=Mine)
max_mines: int = 10
max_steps: int = 200
translate: bool = False
width: float = 200.0
@classmethod
def obs_space(cls) -> ObsSpace:
return obs_space_from_dataclasses(Vehicle, Mine, Target)
@classmethod
def action_space(cls) -> Dict[str, ActionSpace]:
return {
"move": CategoricalActionSpace(
["turn left", "move forward", "turn right"],
)
}
def reset(self, obs_space: ObsSpace) -> Observation:
def randpos() -> Tuple[float, float]:
return (
random.uniform(-self.width / 2, self.width / 2),
random.uniform(-self.width / 2, self.width / 2),
)
self.vehicle.x_pos, self.vehicle.y_pos = randpos()
self.target.x_pos, self.target.y_pos = randpos()
mines: List[Mine] = []
for _ in range(self.max_mines):
x, y = randpos()
# Check that the mine is not too close to the vehicle, target, or any other mine
pos = [(m.x_pos, m.y_pos) for m in mines] + [
(self.vehicle.x_pos, self.vehicle.y_pos),
(self.target.x_pos, self.target.y_pos),
]
if any(map(lambda p: (x - p[0]) ** 2 + (y - p[1]) ** 2 < 15 * 15, pos)):
continue
mines.append(Mine(x, y))
self.vehicle.direction = random.uniform(0, 2 * np.pi)
self.step = 0
self.mines = mines
return self.observe(obs_space)
def _reset(self) -> Observation:
return self.reset(Minefield.obs_space())
def act(self, action: Mapping[str, Action], obs_filter: ObsSpace) -> Observation:
for action_name, a in action.items():
assert isinstance(a, CategoricalAction)
if action_name == "move":
move = a.actions[0][1]
if move == 0:
self.vehicle.direction -= np.pi / 8
elif move == 1:
self.vehicle.x_pos += 3 * np.cos(self.vehicle.direction)
self.vehicle.y_pos += 3 * np.sin(self.vehicle.direction)
elif move == 2:
self.vehicle.direction += np.pi / 8
else:
raise ValueError(
f"Invalid action {move} for action space {action_name}"
)
self.vehicle.direction %= 2 * np.pi
else:
raise ValueError(f"Unknown action type {action_name}")
self.step += 1
self.vehicle.step = self.step
return self.observe(obs_filter)
def _act(self, action: Mapping[str, Action]) -> Observation:
return self.act(
action,
Minefield.obs_space(),
)
def observe(self, obs_filter: ObsSpace, done: bool = False) -> Observation:
if (self.target.x_pos - self.vehicle.x_pos) ** 2 + (
self.target.y_pos - self.vehicle.y_pos
) ** 2 < 5 * 5:
done = True
reward = 1
elif (
any(
map(
lambda m: (self.vehicle.x_pos - m.x_pos) ** 2
+ (self.vehicle.y_pos - m.y_pos) ** 2
< 5 * 5,
self.mines,
)
)
or self.step >= self.max_steps
):
done = True
reward = 0
else:
done = False
reward = 0
if self.translate:
ox = self.vehicle.x_pos
oy = self.vehicle.y_pos
else:
ox = oy = 0
return Observation(
entities=extract_features(
{
"Mine": [Mine(m.x_pos - ox, m.y_pos - oy) for m in self.mines],
"Vehicle": [self.vehicle],
"Target": [Target(self.target.x_pos - ox, self.target.y_pos - oy)],
},
obs_filter,
),
action_masks={
"move": DenseCategoricalActionMask(actors=np.array([0]), mask=None),
},
ids=list(range(len(self.mines) + 2)),
reward=reward,
done=done,
end_of_episode_info=EpisodeStats(self.step, reward) if done else None,
)
| 31.606936 | 99 | 0.537857 |
4a1fed3e70c7fe0afb3cf1195010dd522bbaaf81 | 15,718 | py | Python | uncertainty_baselines/models/efficientnet.py | elisim/uncertainty-baselines | ba92f46114d6c49039f8150c142927cf9cba7a9e | [
"Apache-2.0"
] | null | null | null | uncertainty_baselines/models/efficientnet.py | elisim/uncertainty-baselines | ba92f46114d6c49039f8150c142927cf9cba7a9e | [
"Apache-2.0"
] | null | null | null | uncertainty_baselines/models/efficientnet.py | elisim/uncertainty-baselines | ba92f46114d6c49039f8150c142927cf9cba7a9e | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2021 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientNet model adopted from official estimator version for tf2.
[1] Mingxing Tan, Quoc V. Le
EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks.
ICML'19, https://arxiv.org/abs/1905.11946.
"""
import collections
import functools
import math
import tensorflow as tf
from uncertainty_baselines.models import efficientnet_utils
BlockArgs = collections.namedtuple('BlockArgs', [
'kernel_size',
'num_repeat',
'input_filters',
'output_filters',
'expand_ratio',
'strides',
'se_ratio',
])
def efficientnet_params(model_name):
"""Get efficientnet params based on model name."""
params_dict = {
# (width_coefficient, depth_coefficient, resolution, dropout_rate)
'efficientnet-b0': (1.0, 1.0, 224, 0.2),
'efficientnet-b1': (1.0, 1.1, 240, 0.2),
'efficientnet-b2': (1.1, 1.2, 260, 0.3),
'efficientnet-b3': (1.2, 1.4, 300, 0.3),
'efficientnet-b4': (1.4, 1.8, 380, 0.4),
'efficientnet-b5': (1.6, 2.2, 456, 0.4),
'efficientnet-b6': (1.8, 2.6, 528, 0.5),
'efficientnet-b7': (2.0, 3.1, 600, 0.5),
'efficientnet-b8': (2.2, 3.6, 672, 0.5),
'efficientnet-l2': (4.3, 5.3, 800, 0.5),
}
return params_dict[model_name]
def round_filters(filters, width_coefficient, depth_divisor, min_depth):
"""Round number of filters based on depth multiplier."""
filters *= width_coefficient
min_depth = min_depth or depth_divisor
new_filters = max(
min_depth,
int(filters + depth_divisor / 2) // depth_divisor * depth_divisor)
# Make sure that round down does not go down by more than 10%.
if new_filters < 0.9 * filters:
new_filters += depth_divisor
return int(new_filters)
def round_repeats(repeats, depth_coefficient):
"""Round number of filters based on depth multiplier."""
return int(math.ceil(depth_coefficient * repeats))
class MBConvBlock(tf.keras.layers.Layer):
"""A class of MBConv: Mobile Inverted Residual Bottleneck."""
def __init__(self,
block_args,
batch_norm_momentum,
batch_norm_epsilon,
batch_norm,
data_format,
relu_fn,
use_se,
clip_projection_output):
"""Initializes a MBConv block.
Args:
block_args: BlockArgs, arguments to create a Block.
batch_norm_momentum: Momentum for batch normalization.
batch_norm_epsilon: Epsilon for batch normalization.
batch_norm: Batch norm layer.
data_format: Image data format.
relu_fn: Activation.
use_se: Whether to use squeeze and excitation layers.
clip_projection_output: Whether to clip projected conv outputs.
"""
super(MBConvBlock, self).__init__()
self._block_args = block_args
self._batch_norm_momentum = batch_norm_momentum
self._batch_norm_epsilon = batch_norm_epsilon
self._batch_norm = batch_norm
self._data_format = data_format
if self._data_format == 'channels_first':
self._channel_axis = 1
self._spatial_dims = [2, 3]
else:
self._channel_axis = -1
self._spatial_dims = [1, 2]
self._relu_fn = relu_fn
self._has_se = (
use_se and self._block_args.se_ratio is not None and
0 < self._block_args.se_ratio <= 1)
self._clip_projection_output = clip_projection_output
self._build()
def _build(self):
"""Builds block according to the arguments."""
filters = self._block_args.input_filters * self._block_args.expand_ratio
kernel_size = self._block_args.kernel_size
self._expand_conv = tf.keras.layers.Conv2D(
filters=filters,
kernel_size=[1, 1],
strides=[1, 1],
kernel_initializer=efficientnet_utils.conv_kernel_initializer,
padding='same',
data_format=self._data_format,
use_bias=False)
self._bn0 = self._batch_norm(
axis=self._channel_axis,
momentum=self._batch_norm_momentum,
epsilon=self._batch_norm_epsilon)
self._depthwise_conv = tf.keras.layers.DepthwiseConv2D(
kernel_size=[kernel_size, kernel_size],
strides=self._block_args.strides,
depthwise_initializer=efficientnet_utils.conv_kernel_initializer,
padding='same',
data_format=self._data_format,
use_bias=False)
self._bn1 = self._batch_norm(
axis=self._channel_axis,
momentum=self._batch_norm_momentum,
epsilon=self._batch_norm_epsilon)
if self._has_se:
num_reduced_filters = max(
1, int(self._block_args.input_filters * self._block_args.se_ratio))
self._se_reduce = tf.keras.layers.Conv2D(
num_reduced_filters,
kernel_size=[1, 1],
strides=[1, 1],
kernel_initializer=efficientnet_utils.conv_kernel_initializer,
padding='same',
data_format=self._data_format,
use_bias=True)
self._se_expand = tf.keras.layers.Conv2D(
filters,
kernel_size=[1, 1],
strides=[1, 1],
kernel_initializer=efficientnet_utils.conv_kernel_initializer,
padding='same',
data_format=self._data_format,
use_bias=True)
filters = self._block_args.output_filters
self._project_conv = tf.keras.layers.Conv2D(
filters=filters,
kernel_size=[1, 1],
strides=[1, 1],
kernel_initializer=efficientnet_utils.conv_kernel_initializer,
padding='same',
data_format=self._data_format,
use_bias=False)
self._bn2 = self._batch_norm(
axis=self._channel_axis,
momentum=self._batch_norm_momentum,
epsilon=self._batch_norm_epsilon)
def call(self, inputs, training=True, survival_prob=None):
"""Implementation of call().
Args:
inputs: the inputs tensor.
training: boolean, whether the model is constructed for training.
survival_prob: float, between 0 to 1, drop connect rate.
Returns:
A output tensor.
"""
x = inputs
if self._block_args.expand_ratio != 1:
x = self._relu_fn(self._bn0(self._expand_conv(x), training=training))
x = self._relu_fn(self._bn1(self._depthwise_conv(x), training=training))
if self._has_se:
se_tensor = tf.reduce_mean(
x, self._spatial_dims, keepdims=True)
se_tensor = self._se_expand(self._relu_fn(self._se_reduce(se_tensor)))
x = tf.sigmoid(se_tensor) * x
x = self._bn2(self._project_conv(x), training=training)
# Add identity so that quantization-aware training can insert quantization
# ops correctly.
x = tf.identity(x)
if self._clip_projection_output:
x = tf.clip_by_value(x, -6, 6)
if all(
s == 1 for s in self._block_args.strides
) and self._block_args.input_filters == self._block_args.output_filters:
if survival_prob:
x = efficientnet_utils.drop_connect(x, training, survival_prob)
x = tf.add(x, inputs)
return x
class EfficientNetModel(tf.keras.Model):
"""EfficientNet."""
def __init__(self,
width_coefficient,
depth_coefficient,
dropout_rate,
batch_norm_momentum=0.99,
batch_norm_epsilon=1e-3,
survival_prob=0.8,
data_format='channels_last',
num_classes=1000,
depth_divisor=8,
min_depth=None,
relu_fn=tf.nn.swish,
# TPU-specific requirement.
batch_norm=tf.keras.layers.experimental.SyncBatchNormalization,
use_se=True,
clip_projection_output=False):
"""Initializes model instance.
Args:
width_coefficient: Coefficient to scale width.
depth_coefficient: Coefficient to scale depth.
dropout_rate: Dropout rate.
batch_norm_momentum: Momentum for batch normalization.
batch_norm_epsilon: Epsilon for batch normalization.
survival_prob: float, survival probability for stochastic depth.
data_format: Image data format.
num_classes: Number of output classes.
depth_divisor: Divisor to divide filters per conv when rounding.
min_depth: Minimum depth per conv when rounding filters.
relu_fn: Activation.
batch_norm: Batch norm layer.
use_se: Whether to use squeeze and excitation layers.
clip_projection_output: Whether to clip projected conv outputs.
"""
super(EfficientNetModel, self).__init__()
self._width_coefficient = width_coefficient
self._depth_coefficient = depth_coefficient
self._dropout_rate = dropout_rate
self._batch_norm_momentum = batch_norm_momentum
self._batch_norm_epsilon = batch_norm_epsilon
self._survival_prob = survival_prob
self._data_format = data_format
self._num_classes = num_classes
self._depth_divisor = depth_divisor
self._min_depth = min_depth
self._relu_fn = relu_fn
self._batch_norm = batch_norm
self._use_se = use_se
self._clip_projection_output = clip_projection_output
self._build()
def _build(self):
"""Builds a model."""
if self._data_format == 'channels_first':
channel_axis = 1
self._spatial_dims = [2, 3]
else:
channel_axis = -1
self._spatial_dims = [1, 2]
self._conv_stem = tf.keras.layers.Conv2D(
filters=round_filters(32,
self._width_coefficient,
self._depth_divisor,
self._min_depth),
kernel_size=[3, 3],
strides=[2, 2],
kernel_initializer=efficientnet_utils.conv_kernel_initializer,
padding='same',
data_format=self._data_format,
use_bias=False)
self._bn0 = self._batch_norm(
axis=channel_axis,
momentum=self._batch_norm_momentum,
epsilon=self._batch_norm_epsilon)
Block = functools.partial( # pylint: disable=invalid-name
MBConvBlock,
batch_norm_momentum=self._batch_norm_momentum,
batch_norm_epsilon=self._batch_norm_epsilon,
batch_norm=self._batch_norm,
data_format=self._data_format,
relu_fn=self._relu_fn,
use_se=self._use_se,
clip_projection_output=self._clip_projection_output)
self._blocks = []
blocks_args = [
BlockArgs(kernel_size=3,
num_repeat=1,
input_filters=32,
output_filters=16,
expand_ratio=1,
strides=[1, 1],
se_ratio=0.25),
BlockArgs(kernel_size=3,
num_repeat=2,
input_filters=16,
output_filters=24,
expand_ratio=6,
strides=[2, 2],
se_ratio=0.25),
BlockArgs(kernel_size=5,
num_repeat=2,
input_filters=24,
output_filters=40,
expand_ratio=6,
strides=[2, 2],
se_ratio=0.25),
BlockArgs(kernel_size=3,
num_repeat=3,
input_filters=40,
output_filters=80,
expand_ratio=6,
strides=[2, 2],
se_ratio=0.25),
BlockArgs(kernel_size=5,
num_repeat=3,
input_filters=80,
output_filters=112,
expand_ratio=6,
strides=[1, 1],
se_ratio=0.25),
BlockArgs(kernel_size=5,
num_repeat=4,
input_filters=112,
output_filters=192,
expand_ratio=6,
strides=[2, 2],
se_ratio=0.25),
BlockArgs(kernel_size=3,
num_repeat=1,
input_filters=192,
output_filters=320,
expand_ratio=6,
strides=[1, 1],
se_ratio=0.25),
]
for block_args in blocks_args:
# Update block input and output filters based on depth multiplier.
input_filters = round_filters(block_args.input_filters,
self._width_coefficient,
self._depth_divisor,
self._min_depth)
output_filters = round_filters(block_args.output_filters,
self._width_coefficient,
self._depth_divisor,
self._min_depth)
repeats = round_repeats(block_args.num_repeat,
self._depth_coefficient)
block_args = block_args._replace(
input_filters=input_filters,
output_filters=output_filters,
num_repeat=repeats)
self._blocks.append(Block(block_args))
if block_args.num_repeat > 1:
# pylint: disable=protected-access
block_args = block_args._replace(
input_filters=block_args.output_filters, strides=[1, 1])
# pylint: enable=protected-access
for _ in range(block_args.num_repeat - 1):
self._blocks.append(Block(block_args))
self._conv_head = tf.keras.layers.Conv2D(
filters=round_filters(1280,
self._width_coefficient,
self._depth_divisor,
self._min_depth),
kernel_size=[1, 1],
strides=[1, 1],
kernel_initializer=efficientnet_utils.conv_kernel_initializer,
padding='same',
use_bias=False)
self._bn1 = self._batch_norm(
axis=channel_axis,
momentum=self._batch_norm_momentum,
epsilon=self._batch_norm_epsilon)
self._avg_pooling = tf.keras.layers.GlobalAveragePooling2D(
data_format=self._data_format)
if self._dropout_rate > 0:
self._dropout = tf.keras.layers.Dropout(self._dropout_rate)
else:
self._dropout = None
self._fc = tf.keras.layers.Dense(
self._num_classes,
kernel_initializer=efficientnet_utils.dense_kernel_initializer)
def call(self, inputs, training=True):
"""Implementation of call().
Args:
inputs: input tensors.
training: boolean, whether the model is constructed for training.
Returns:
output tensors.
"""
outputs = self._relu_fn(
self._bn0(self._conv_stem(inputs), training=training))
for idx, block in enumerate(self._blocks):
survival_prob = self._survival_prob
if survival_prob:
drop_rate = 1.0 - survival_prob
survival_prob = 1.0 - drop_rate * float(idx) / len(self._blocks)
outputs = block.call(
outputs, training=training, survival_prob=survival_prob)
outputs = self._relu_fn(
self._bn1(self._conv_head(outputs), training=training))
outputs = self._avg_pooling(outputs)
if self._dropout:
outputs = self._dropout(outputs, training=training)
outputs = self._fc(outputs)
return outputs
def create_model(*args, **kwargs):
return EfficientNetModel(*args, **kwargs)
| 35.722727 | 78 | 0.631314 |
4a1fef034429ec4838eddb32ea986dd396b6ed92 | 6,703 | py | Python | tests/unit_tests/test_crypto/test_crypto.py | konchunas/pytezos | 65576d18bdf1956fae8ea21241b6c43a38921b83 | [
"MIT"
] | null | null | null | tests/unit_tests/test_crypto/test_crypto.py | konchunas/pytezos | 65576d18bdf1956fae8ea21241b6c43a38921b83 | [
"MIT"
] | null | null | null | tests/unit_tests/test_crypto/test_crypto.py | konchunas/pytezos | 65576d18bdf1956fae8ea21241b6c43a38921b83 | [
"MIT"
] | null | null | null | from unittest import TestCase
from unittest.mock import patch
from parameterized import parameterized
from pytezos.crypto.key import Key
class TestCrypto(TestCase):
"""
Test data generation:
./tezos-client gen keys test_ed25519 -s ed25519 --force (--encrypted)
./tezos-client gen keys test_secp256k1 -s secp256k1 --force (--encrypted)
./tezos-client gen keys test_p256 -s p256 --force (--encrypted)
./tezos-client show address test_ed25519 -S
./tezos-client show address test_secp256k1 -S
./tezos-client show address test_p256 -S
./tezos-client sign bytes 0x74657374 for test_ed25519
./tezos-client sign bytes 0x74657374 for test_secp256k1
./tezos-client sign bytes 0x74657374 for test_p256
Issues:
* `tezos-client sign bytes` does not support P256 curve
"""
@parameterized.expand([
('edsk3nM41ygNfSxVU4w1uAW3G9EnTQEB5rjojeZedLTGmiGRcierVv',
'edpku976gpuAD2bXyx1XGraeKuCo1gUZ3LAJcHM12W1ecxZwoiu22R',
'tz1eKkWU5hGtfLUiqNpucHrXymm83z3DG9Sq'),
('spsk1zkqrmst1yg2c4xi3crWcZPqgdc9KtPtb9SAZWYHAdiQzdHy7j',
'sppk7aMNM3xh14haqEyaxNjSt7hXanCDyoWtRcxF8wbtya859ak6yZT',
'tz28YZoayJjVz2bRgGeVjxE8NonMiJ3r2Wdu'),
('p2sk3PM77YMR99AvD3fSSxeLChMdiQ6kkEzqoPuSwQqhPsh29irGLC',
'p2pk679D18uQNkdjpRxuBXL5CqcDKTKzsiXVtc9oCUT6xb82zQmgUks',
'tz3agP9LGe2cXmKQyYn6T68BHKjjktDbbSWX'),
('p2sk2rHNfHbuqq1Q6RZAnXfwoA3fFk1xtUFPrNVj7mhwxmvY4xmrEd',
'p2pk663exKaDHnzFmUeBsmYjKUMJYPyW1WQJzmhyYgNrUuo5Ef9SXxG',
'tz3VqqyCrvZni4jbxVrzG2EeVQ97D9LARjJz'),
('p2sk2Md6rioE62a7hVdD8xdYGDLH2erDbAcD4i15e8DSpnHruhVHBw',
'p2pk66yEDuRC5RLHpVj8hvAS5fr8HnU2YsLvFNdwQoW3jH8WUynMwGG',
'tz3Q2KTKWw3xqiowvfX4N7gyyAfCz8hTvcnk')
])
def test_derive_key_data(self, sk, pk, pkh):
public_key = Key.from_encoded_key(pk)
self.assertFalse(public_key.is_secret)
self.assertEqual(pk, public_key.public_key())
self.assertEqual(pkh, public_key.public_key_hash())
secret_key = Key.from_encoded_key(sk)
self.assertTrue(secret_key.is_secret)
self.assertEqual(pk, secret_key.public_key())
self.assertEqual(sk, secret_key.secret_key())
@parameterized.expand([
('edpku976gpuAD2bXyx1XGraeKuCo1gUZ3LAJcHM12W1ecxZwoiu22R', b'test',
'edsigtzLBGCyadERX1QsYHKpwnxSxEYQeGLnJGsSkHEsyY8vB5GcNdnvzUZDdFevJK7YZQ2ujwVjvQZn62ahCEcy74AwtbA8HuN'),
('sppk7aMNM3xh14haqEyaxNjSt7hXanCDyoWtRcxF8wbtya859ak6yZT', b'test',
'spsig1RriZtYADyRhyNoQMa6AiPuJJ7AUDcrxWZfgqexzgANqMv4nXs6qsXDoXcoChBgmCcn2t7Y3EkJaVRuAmNh2cDDxWTdmsz'),
('p2pk67wVncLFS1DQDm2gVR45sYCzQSXTtqn3bviNYXVCq6WRoqtxHXL',
'017a06a770000508440322bf4860a065d1c8747a08f7685be9c79da2b21d5930c12fff86b230081d223b000000005c752b3'
'a04bc5b950ff781580616c12a646af98285da66232b232661f179c98d6f8c8912ae00000011000000010000000008000000'
'00009b55bda7ad9debcd2657b76d444b14807c7b5dc13e06f754e2b43186d0fb22b3d3332c0000000000031048815b00',
'sigqWxz3GKFXg6G8ndSzJF8JD9j7m12kPWZj6bHLqdKw6XpxhVLwGm26hVqMdEfgPdoz8qoA5QkM9mvnMyMFmYny9sqjb5bE'),
('p2pk66n1NmhPDEkcf9sXEKe9kBoTwBoTYxke1hx16aTRVq8MoXuwNqo',
'027a06a770ad828485977947451e23e99f5040ead0f09ef89f58be2583640edcb1e295d0cb000005085e',
'sigQVTY9CkYw8qL6Xa7QWestkLSdtPv6HZ4ToSMHDcRot3BwRGwZhSwXd9jJwKkDvvotTLSNWQdUqiDSfXuCNUfjbEaY2j6j')
])
def test_verify_ext_signatures(self, pk, msg, sig):
key = Key.from_encoded_key(pk)
key.verify(sig, msg)
self.assertRaises(ValueError, key.verify, sig, b'fake')
@parameterized.expand([
('edsk3nM41ygNfSxVU4w1uAW3G9EnTQEB5rjojeZedLTGmiGRcierVv', '0xdeadbeaf'),
('spsk1zkqrmst1yg2c4xi3crWcZPqgdc9KtPtb9SAZWYHAdiQzdHy7j', b'hello'),
('p2sk3PM77YMR99AvD3fSSxeLChMdiQ6kkEzqoPuSwQqhPsh29irGLC', b'test')
])
def test_sign_and_verify(self, sk, msg):
key = Key.from_encoded_key(sk)
sig = key.sign(msg)
key.verify(sig, msg)
self.assertRaises(ValueError, key.verify, sig, b'fake')
@parameterized.expand([
('edsk3nM41ygNfSxVU4w1uAW3G9EnTQEB5rjojeZedLTGmiGRcierVv', b'test',
'edsigtzLBGCyadERX1QsYHKpwnxSxEYQeGLnJGsSkHEsyY8vB5GcNdnvzUZDdFevJK7YZQ2ujwVjvQZn62ahCEcy74AwtbA8HuN'),
('spsk1zkqrmst1yg2c4xi3crWcZPqgdc9KtPtb9SAZWYHAdiQzdHy7j', b'test',
'spsig1RriZtYADyRhyNoQMa6AiPuJJ7AUDcrxWZfgqexzgANqMv4nXs6qsXDoXcoChBgmCcn2t7Y3EkJaVRuAmNh2cDDxWTdmsz'),
])
def test_deterministic_signatures(self, sk, msg, sig):
"""
See RFC6979 for explanation
https://tools.ietf.org/html/rfc6979#section-3.2
"""
key = Key.from_encoded_key(sk)
signature = key.sign(msg)
self.assertEqual(sig, signature)
@parameterized.expand([
('edesk1zxaPJkhNGSzgZDDSphvPzSNrnbmqes8xzUrw1wdFxdRT7ePiQz8D2Q18fMjn6fC9ZRS2rUbg8d8snxxznE',
'qqq', b'\xf2h\xbb\xf5\xc7\xe2\xb9\x97', 'edpktmNJub2v7tVjSU8nA9jZrdV5JezmFtZA4yd3jj18i6VKcCJzdo'),
('spesk21cruoqtYmxfq5fpkXiZZRLRw4vh7VFJauGCAgHxZf3q6Q5LTv9m9dnMxyVjna6RzWQL45q4ppGLh97xZpV',
'qqq', b'\xbe\xb8\xeefi\x14\\T', 'sppk7Zbcqfy67b6pRMAKax5QKzAxTQUxmfQcCuvn1QMFQsXqy1NkSkz'),
('p2esk1rqdHRPz4xQh8uP8JaWSVnGFTKxkh2utdjK5CPDTXAzzh5sXnnobLkGrXEZzGhCKFDSjv8Ggrjt7PnobRzs',
'qqq', b'"\xf8\x0e \x0f]hc', 'p2pk68Ky2h9UZZ4jUYws8mU8Cazhu4H1LdK22wD8HgDPRSvsJPBDtJ7'),
])
def test_encrypted_keys(self, sk, passphrase, salt, pk):
key = Key.from_encoded_key(sk, passphrase=passphrase)
self.assertEqual(pk, key.public_key())
with patch('pytezos.crypto.key.pysodium.randombytes', return_value=salt):
self.assertEqual(sk, key.secret_key(passphrase))
@parameterized.expand([
('eight life cycle hub response suffer useless custom drama baby royal embrace door',),
('eight life cycle hub response suffer useless custom drama baby royal embrace door on tw',),
('eight life cycle hub response suffer useless custom drama baby royal embrace door duck dog',),
])
def test_bad_mnemonic(self, mnemonic):
self.assertRaises(ValueError, Key.from_mnemonic, mnemonic)
def test_regression_p256_short_sig(self):
key = Key.from_encoded_key('p2sk3xPfYsoExTVi7bGSH2KoHgpxFNqewUczHkLtQvr1bwnbhzGM9Y')
key.sign('try25')
def test_encrypted_key_str_password(self):
key = Key.from_encoded_key(
key='edesk1UrFQK6xJM6SYdLxMQbyKaaYQmzYVvQRpJXUmxj3apZ1ufRu4aHSTqWrJiqcHywSbnF146wkNcpUAW7Qy6H',
passphrase='12345')
self.assertEqual('edsk2juUM8ZMUkaCKHWVnzWhp9DxrK93YK1rQjYk3pTEq2ThXpBxkX', key.secret_key())
| 51.561538 | 112 | 0.75414 |
4a1fef8a5c440e057ef938ed0f0d7f7ccf3d781f | 4,572 | py | Python | pysnmp/CXLANIOGEN-PORT-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 11 | 2021-02-02T16:27:16.000Z | 2021-08-31T06:22:49.000Z | pysnmp/CXLANIOGEN-PORT-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 75 | 2021-02-24T17:30:31.000Z | 2021-12-08T00:01:18.000Z | pysnmp/CXLANIOGEN-PORT-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module CXLANIOGEN-PORT-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CXLANIOGEN-PORT-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 18:17:23 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion")
cxLanIoPort, = mibBuilder.importSymbols("CXProduct-SMI", "cxLanIoPort")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Gauge32, Unsigned32, iso, NotificationType, Bits, Integer32, ObjectIdentity, Counter64, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, Counter32, IpAddress, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "Gauge32", "Unsigned32", "iso", "NotificationType", "Bits", "Integer32", "ObjectIdentity", "Counter64", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "Counter32", "IpAddress", "TimeTicks")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
class PhysAddress(OctetString):
pass
cxLanIoGenPortTable = MibTable((1, 3, 6, 1, 4, 1, 495, 2, 1, 5, 10, 2), )
if mibBuilder.loadTexts: cxLanIoGenPortTable.setStatus('mandatory')
cxLanIoGenPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 495, 2, 1, 5, 10, 2, 1), ).setIndexNames((0, "CXLANIOGEN-PORT-MIB", "cxLanIoGenPortIndex"))
if mibBuilder.loadTexts: cxLanIoGenPortEntry.setStatus('mandatory')
cxLanIoGenPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 5, 10, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cxLanIoGenPortIndex.setStatus('mandatory')
cxLanIoGenMacAddrSrc = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 5, 10, 2, 1, 2), PhysAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cxLanIoGenMacAddrSrc.setStatus('mandatory')
cxLanIoGenMacAddrDst = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 5, 10, 2, 1, 3), PhysAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cxLanIoGenMacAddrDst.setStatus('mandatory')
cxLanIoGenType = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 5, 10, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("invalid", 1), ("disabled", 2), ("internalLoopbackLevel1", 3), ("internalLoopbackLevel2", 4), ("noLoopbackFrameVerify", 5), ("noLoopbackFrameForward", 6)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cxLanIoGenType.setStatus('mandatory')
cxLanIoGenDelay = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 5, 10, 2, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cxLanIoGenDelay.setStatus('mandatory')
cxLanIoGenFrameSize = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 5, 10, 2, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(64, 4096))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cxLanIoGenFrameSize.setStatus('mandatory')
cxLanIoGenStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 5, 10, 2, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("disabled", 2), ("internalLoopbackLevel1", 3), ("internalLoopbackLevel2", 4), ("noLoopbackFrameVerify", 5), ("noLoopbackFrameForward", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cxLanIoGenStatus.setStatus('mandatory')
cxLanIoGenRxError = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 5, 10, 2, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cxLanIoGenRxError.setStatus('mandatory')
mibBuilder.exportSymbols("CXLANIOGEN-PORT-MIB", cxLanIoGenPortEntry=cxLanIoGenPortEntry, cxLanIoGenDelay=cxLanIoGenDelay, cxLanIoGenMacAddrSrc=cxLanIoGenMacAddrSrc, cxLanIoGenFrameSize=cxLanIoGenFrameSize, cxLanIoGenMacAddrDst=cxLanIoGenMacAddrDst, cxLanIoGenRxError=cxLanIoGenRxError, cxLanIoGenPortTable=cxLanIoGenPortTable, cxLanIoGenType=cxLanIoGenType, PhysAddress=PhysAddress, cxLanIoGenPortIndex=cxLanIoGenPortIndex, cxLanIoGenStatus=cxLanIoGenStatus)
| 117.230769 | 477 | 0.76881 |
4a1ff12a2ac4869f32f8c00e57168a64ca8236a8 | 8,946 | py | Python | tensor2tensor/envs/trajectory.py | shankharaj29/tensor2tensor | b89ba51a6fa9e0c20009cfb57ee8de04f7138392 | [
"Apache-2.0"
] | 1 | 2019-02-16T10:39:45.000Z | 2019-02-16T10:39:45.000Z | tensor2tensor/envs/trajectory.py | PedroLelis/tensor2tensor | 5a867d031bd493eeb7d2776e1118d1594ff0a623 | [
"Apache-2.0"
] | null | null | null | tensor2tensor/envs/trajectory.py | PedroLelis/tensor2tensor | 5a867d031bd493eeb7d2776e1118d1594ff0a623 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trajectory manages a sequence of TimeSteps.
BatchTrajectory manages a batch of trajectories, also keeping account of
completed trajectories.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensor2tensor.envs import time_step
class Trajectory(object):
"""Basically a list of TimeSteps with convenience methods."""
def __init__(self):
# Contains a list of time steps.
self._time_steps = []
def __str__(self):
if not self.time_steps:
return "Trajectory[]"
return "Trajectory[{}]".format(", ".join(str(ts) for ts in self.time_steps))
def add_time_step(self, **create_time_step_kwargs):
"""Creates a time-step and appends it to the list.
Args:
**create_time_step_kwargs: Forwarded to
time_step.TimeStep.create_time_step.
"""
ts = time_step.TimeStep.create_time_step(**create_time_step_kwargs)
assert isinstance(ts, time_step.TimeStep)
self._time_steps.append(ts)
def change_last_time_step(self, **replace_time_step_kwargs):
"""Replace the last time-steps with the given kwargs."""
# Pre-conditions: self._time_steps shouldn't be empty.
assert self._time_steps
self._time_steps[-1] = self._time_steps[-1].replace(
**replace_time_step_kwargs)
def last_time_step(self):
# Pre-conditions: self._time_steps shouldn't be empty.
assert self._time_steps
return self._time_steps[-1]
# We could have overridden __nonzero__ or __bool__ as well.
def num_time_steps(self):
return len(self._time_steps)
def is_active(self):
return bool(self.num_time_steps())
@property
def time_steps(self):
return self._time_steps
class BatchTrajectory(object):
"""Basically a batch of active trajectories and a list of completed ones."""
def __init__(self, batch_size=1):
self.batch_size = batch_size
# Stores trajectories that are currently active, i.e. aren't done or reset.
self._trajectories = [Trajectory() for _ in range(self.batch_size)]
# Stores trajectories that are completed.
# NOTE: We don't track the index this came from, as it's not needed, right?
self._completed_trajectories = []
def reset_batch_trajectories(self):
self.__init__(batch_size=self.batch_size)
def __str__(self):
string = "BatchTrajectory["
for i, t in enumerate(self.trajectories):
string += "Trajectory {} = {}\n".format(i, str(t))
for i, t in enumerate(self.completed_trajectories):
string += "Completed Trajectory {} = {}\n".format(i, str(t))
return string + "]"
@property
def trajectories(self):
return self._trajectories
@property
def completed_trajectories(self):
return self._completed_trajectories
def _complete_trajectory(self, trajectory, index):
"""Completes the given trajectory at the given index."""
assert isinstance(trajectory, Trajectory)
# This *should* be the case.
assert trajectory.last_time_step().action is None
# Add to completed trajectories.
self._completed_trajectories.append(trajectory)
# Make a new one to replace it.
self._trajectories[index] = Trajectory()
def reset(self, indices, observations):
"""Resets trajectories at given indices and populates observations.
Reset can either be called right at the beginning, when there are no
time-steps, or to reset a currently active trajectory.
If resetting a currently active trajectory then we save it in
self._completed_trajectories.
Args:
indices: 1-D np.ndarray stating the indices to reset.
observations: np.ndarray of shape (indices len, obs.shape) of observations
"""
# Pre-conditions: indices, observations are np arrays.
# : indices is one-dimensional.
# : their first dimension (batch) is the same.
assert isinstance(indices, np.ndarray)
assert len(indices.shape) == 1
assert isinstance(observations, np.ndarray)
assert indices.shape[0] == observations.shape[0]
for index, observation in zip(indices, observations):
trajectory = self._trajectories[index]
# Are we starting a new trajectory at the given index?
if not trajectory.is_active():
# Then create a new time-step here with the given observation.
trajectory.add_time_step(observation=observation)
# That's all we need to do here.
continue
# If however we are resetting a currently active trajectory then we need
# to put that in self._completed_trajectories and make a new trajectory
# with the current observation.
# TODO(afrozm): Should we mark these are done? Or is the done=False and
# this being the last time-step in the trajectory good enough to recognize
# that this was reset?
# Mark trajectory as completed and move into completed_trajectories.
self._complete_trajectory(trajectory, index)
# Put the observation in the newly created trajectory.
# TODO(afrozm): Add 0 reward.
self._trajectories[index].add_time_step(observation=observation)
def complete_all_trajectories(self):
"""Essentially same as reset, but we don't have observations."""
for index in range(self.batch_size):
trajectory = self._trajectories[index]
assert trajectory.is_active()
self._complete_trajectory(trajectory, index)
def step(self, observations, raw_rewards, processed_rewards, dones, actions):
"""Record the information obtained from taking a step in all envs.
Records (observation, rewards, done) in a new time-step and actions in the
current time-step.
If any trajectory gets done, we move that trajectory to
completed_trajectories.
Args:
observations: ndarray of first dimension self.batch_size, which has the
observations after we've stepped, i.e. s_{t+1} where t is the current
state.
raw_rewards: ndarray of first dimension self.batch_size containing raw
rewards i.e. r_{t+1}.
processed_rewards: ndarray of first dimension self.batch_size containing
processed rewards. i.e. r_{t+1}
dones: ndarray of first dimension self.batch_size, containing true at an
index if that env is done, i.e. d_{t+1}
actions: ndarray of first dimension self.batch_size, containing actions
applied at the current time-step, which leads to the observations
rewards and done at the next time-step, i.e. a_t
"""
# Pre-conditions
assert isinstance(observations, np.ndarray)
assert isinstance(raw_rewards, np.ndarray)
assert isinstance(processed_rewards, np.ndarray)
assert isinstance(dones, np.ndarray)
assert isinstance(actions, np.ndarray)
# We assume that we step in all envs, i.e. not like reset where we can reset
# some envs and not others.
assert self.batch_size == observations.shape[0]
assert self.batch_size == raw_rewards.shape[0]
assert self.batch_size == processed_rewards.shape[0]
assert self.batch_size == dones.shape[0]
assert self.batch_size == actions.shape[0]
for index in range(self.batch_size):
trajectory = self._trajectories[index]
# NOTE: If the trajectory isn't active, that means it doesn't have any
# time-steps in it, but we are in step, so the assumption is that it has
# a prior observation from which we are stepping away from.
# TODO(afrozm): Let's re-visit this if it becomes too restrictive.
assert trajectory.is_active()
# To this trajectory's last time-step, set actions.
trajectory.change_last_time_step(action=actions[index])
# Create a new time-step to add observation, done & rewards (no actions).
trajectory.add_time_step(
observation=observations[index],
done=dones[index],
raw_reward=raw_rewards[index],
processed_reward=processed_rewards[index])
# If the trajectory is completed, i.e. dones[index] == True, then we
# account for it right-away.
if dones[index]:
self._complete_trajectory(trajectory, index)
# NOTE: The new trajectory at `index` is going to be in-active and
# `reset` should be called on it.
assert not self._trajectories[index].is_active()
| 36.663934 | 80 | 0.708138 |
4a1ff17aeaa5f8eaaaf5fd08242924058f96b020 | 965 | py | Python | unittest_reinvent/library_design/__init__.py | MolecularAI/reinvent-chemistry | bf0235bc2b1168b1db54c1e04bdba04b166ab7bf | [
"MIT"
] | null | null | null | unittest_reinvent/library_design/__init__.py | MolecularAI/reinvent-chemistry | bf0235bc2b1168b1db54c1e04bdba04b166ab7bf | [
"MIT"
] | null | null | null | unittest_reinvent/library_design/__init__.py | MolecularAI/reinvent-chemistry | bf0235bc2b1168b1db54c1e04bdba04b166ab7bf | [
"MIT"
] | 1 | 2022-03-22T15:24:13.000Z | 2022-03-22T15:24:13.000Z | from unittest_reinvent.library_design.aizynth import TestAiZynthClient
from unittest_reinvent.library_design.reaction_definitions import TestStandardDefinitions, TestBuildingBlocks
from unittest_reinvent.library_design.reaction_filters import *
from unittest_reinvent.library_design.test_attachment_points import TestAttachmentPoints
from unittest_reinvent.library_design.test_bond_maker import TestBondMaker
from unittest_reinvent.library_design.test_failing_reactions_enumerator import TestFailingReactionsEnumerator, \
TestNonFailingReactionsEnumerator
from unittest_reinvent.library_design.test_fragment_filter import TestFragmentFilter
from unittest_reinvent.library_design.test_fragment_reactions import TestFragmentReactions
from unittest_reinvent.library_design.test_fragment_reactions_slice_enumerator import \
TestSingleFragmentReactionsSliceEnumerator, TestMultipleFragmentReactionsSliceEnumerator, \
TestReactionsSliceEnumeratorWithFilters
| 68.928571 | 112 | 0.915026 |
4a1ff39fdac153038308cf02fdf0eabd46d85a63 | 11,680 | py | Python | gooddata-metadata-client/gooddata_metadata_client/model/inline_response200_options.py | gooddata/gooddata-python-sdk | df4d4a4d730ab376960ae2ed01e7d86498e85c6a | [
"MIT"
] | 7 | 2022-01-24T16:27:06.000Z | 2022-02-25T10:18:49.000Z | gooddata-metadata-client/gooddata_metadata_client/model/inline_response200_options.py | gooddata/gooddata-python-sdk | df4d4a4d730ab376960ae2ed01e7d86498e85c6a | [
"MIT"
] | 29 | 2022-01-20T15:45:38.000Z | 2022-03-31T09:39:25.000Z | gooddata-metadata-client/gooddata_metadata_client/model/inline_response200_options.py | gooddata/gooddata-python-sdk | df4d4a4d730ab376960ae2ed01e7d86498e85c6a | [
"MIT"
] | 7 | 2022-01-20T07:11:15.000Z | 2022-03-09T14:50:17.000Z | """
OpenAPI definition
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from gooddata_metadata_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from gooddata_metadata_client.exceptions import ApiAttributeError
def lazy_import():
from gooddata_metadata_client.model.inline_response200_options_links import InlineResponse200OptionsLinks
globals()['InlineResponse200OptionsLinks'] = InlineResponse200OptionsLinks
class InlineResponse200Options(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'description': (str,), # noqa: E501
'links': (InlineResponse200OptionsLinks,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'description': 'description', # noqa: E501
'links': 'links', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""InlineResponse200Options - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
description (str): [optional] # noqa: E501
links (InlineResponse200OptionsLinks): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""InlineResponse200Options - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
description (str): [optional] # noqa: E501
links (InlineResponse200OptionsLinks): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 43.745318 | 124 | 0.579195 |
4a1ff3c784d0b482fa31ae2221cacc2626124244 | 10,333 | py | Python | baselines/baselines_keras_rnn.py | dainis-boumber/amamda | 6690c96b883815db5c1cb00ddac756e075c119aa | [
"MIT"
] | 2 | 2018-02-17T17:18:52.000Z | 2019-11-24T09:37:50.000Z | baselines/baselines_keras_rnn.py | dainis-boumber/amamda | 6690c96b883815db5c1cb00ddac756e075c119aa | [
"MIT"
] | null | null | null | baselines/baselines_keras_rnn.py | dainis-boumber/amamda | 6690c96b883815db5c1cb00ddac756e075c119aa | [
"MIT"
] | null | null | null | import logging
from pathlib import Path
import numpy as np
np.random.seed(1337)
import keras
from keras import backend as K
from keras import regularizers
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_auc_score
from keras.layers import Input
from keras.layers import Embedding
from keras.layers import Conv1D
from keras.layers import GRU
from keras.layers import MaxPooling1D
from keras.layers import Flatten
from keras.layers import Dense
from keras.models import Model
from keras.layers import Dropout
from keras.callbacks import Callback
from data.DataBuilderML400 import DataBuilderML400
from data.DataBuilderPan import DataBuilderPan
from data.base import DataBuilder
import matplotlib.pyplot as pyplot
class roc_callback(Callback):
def __init__(self, training_data, validation_data):
self.x = training_data[0]
self.y = training_data[1]
self.x_val = validation_data[0]
self.y_val = validation_data[1]
def on_train_begin(self, logs=None):
return
def on_train_end(self, logs=None):
return
def on_epoch_begin(self, epoch, logs=None):
return
def on_epoch_end(self, epoch, logs=None):
y_pred = self.model.predict(self.x)
roc = roc_auc_score(self.y, y_pred)
if "train_roc" in self.model.history.history:
self.model.history.history["train_roc"].append(roc)
else:
self.model.history.history["train_roc"] = [roc]
y_pred_val = self.model.predict(self.x_val)
roc_val = roc_auc_score(self.y_val, y_pred_val)
if "val_roc" in self.model.history.history:
self.model.history.history["val_roc"].append(roc_val)
else:
self.model.history.history["val_roc"] = [roc_val]
print('\rroc-auc: %s - roc-auc_val: %s' % (str(round(roc,4)),str(round(roc_val,4))),end=100*' '+'\n')
return
def on_batch_begin(self, batch, logs=None):
return
def on_batch_end(self, batch, logs=None):
return
def custom_activation(x):
return K.max( K.sqrt(x + 1) - 1, 0 )
def rnn_concat(data_builder: DataBuilder):
logging.info("BUILDING RNN USING CONCATENATION")
embedding_layer = Embedding(input_length=data_builder.target_doc_len,
input_dim=data_builder.vocabulary_size + 1,
output_dim=100,
weights=[data_builder.embed_matrix],
trainable=False,
mask_zero=True,
name="embedding_layer")
k_input = Input(shape=(data_builder.target_doc_len,), dtype='int32', name="k_doc_input")
k_embedded_seq = embedding_layer(k_input)
u_input = Input(shape=(data_builder.target_doc_len,), dtype='int32', name="u_doc_input")
u_embedded_seq = embedding_layer(u_input)
# shared first conv
gru_layer = GRU(units=64, name="gru_layer",
dropout=0.3, recurrent_dropout=0.3,
reset_after=True
)
k_feat = gru_layer(k_embedded_seq)
u_feat = gru_layer(u_embedded_seq)
all_feat = keras.layers.concatenate([k_feat, u_feat])
# all_feat = Dense(32, activation='relu')(all_feat)
# all_feat = Dropout(rate=0.3)(all_feat)
preds = Dense(1, activation='sigmoid')(all_feat)
model = Model([k_input, u_input], preds)
model.compile(loss='binary_crossentropy',
optimizer='adadelta',
metrics=['acc'])
return model
def rnn_subtract(data_builder: DataBuilder):
logging.info("BUILDING RNN USING OUTER PRODUCT")
embedding_layer = Embedding(input_length=data_builder.target_doc_len,
input_dim=data_builder.vocabulary_size + 1,
output_dim=100,
weights=[data_builder.embed_matrix],
trainable=False,
mask_zero=True,
name="embedding_layer")
k_input = Input(shape=(data_builder.target_doc_len,), dtype='int32', name="k_doc_input")
k_embedded_seq = embedding_layer(k_input)
u_input = Input(shape=(data_builder.target_doc_len,), dtype='int32', name="u_doc_input")
u_embedded_seq = embedding_layer(u_input)
# shared first conv
gru_layer = GRU(units=64, name="gru_layer",
dropout=0.3, recurrent_dropout=0.3)
k_feat = gru_layer(k_embedded_seq)
u_feat = gru_layer(u_embedded_seq)
x = keras.layers.subtract([k_feat, u_feat])
# x = Dense(32, activation='relu')(x)
preds = Dense(1, activation='sigmoid')(x)
model = Model([k_input, u_input], preds)
model.compile(loss='binary_crossentropy',
optimizer='adadelta',
metrics=['acc'])
return model
def rnn_outer_product(data_builder: DataBuilder):
logging.info("BUILDING RNN USING OUTER PRODUCT")
embedding_layer = Embedding(input_length=data_builder.target_doc_len,
input_dim=data_builder.vocabulary_size + 1,
output_dim=100,
weights=[data_builder.embed_matrix],
trainable=False,
mask_zero=True,
name="embedding_layer")
k_input = Input(shape=(data_builder.target_doc_len,), dtype='int32', name="k_doc_input")
k_embedded_seq = embedding_layer(k_input)
u_input = Input(shape=(data_builder.target_doc_len,), dtype='int32', name="u_doc_input")
u_embedded_seq = embedding_layer(u_input)
# shared first conv
gru_layer = GRU(units=64, name="gru_layer", dropout=0.3, recurrent_dropout=0.3)
k_feat = gru_layer(k_embedded_seq)
u_feat = gru_layer(u_embedded_seq)
d_layer = Dense(8, activation='relu')
k_feat = d_layer(k_feat)
u_feat = d_layer(u_feat)
k_feat = keras.layers.Reshape([8, 1])(k_feat)
u_feat = keras.layers.Reshape([1, 8])(u_feat)
x = keras.layers.Multiply()([k_feat, u_feat])
x = Flatten()(x)
# x = keras.layers.subtract([k_feat, u_feat])
# x = Dense(32, activation='relu')(x)
preds = Dense(1, activation='sigmoid')(x)
model = Model([k_input, u_input], preds)
model.compile(loss='binary_crossentropy',
optimizer='adadelta',
metrics=['acc'])
return model
def try_ml():
ml_data_builder = DataBuilderML400(embed_dim=100, vocab_size=20000,
target_doc_len=8000, target_sent_len=1024)
train_data = ml_data_builder.get_train_data()
val_data = ml_data_builder.get_val_data()
# save_path = Path("temp_model.h5")
# if save_path.exists():
# model = keras.models.load_model(save_path)
# else:
model = rnn_outer_product(ml_data_builder)
model.fit([np.stack(train_data.value["k_doc"].as_matrix()), np.stack(train_data.value["u_doc"].as_matrix())],
train_data.label_doc,
validation_data=(
[np.stack(val_data.value["k_doc"].as_matrix()),
np.stack(val_data.value["u_doc"].as_matrix())],
val_data.label_doc),
epochs=4, batch_size=32)
test_data = ml_data_builder.get_test_data()
loss, acc = model.evaluate(x=[np.stack(test_data.value["k_doc"].as_matrix()),
np.stack(test_data.value["u_doc"].as_matrix())],
y=test_data.label_doc, batch_size=32)
logging.info("LOSS: " + str(loss))
logging.info("ACCU: " + str(acc))
def try_pan():
data_builder = DataBuilderPan(year="15", train_split="pan15_train", test_split="pan15_test",
embed_dim=100, vocab_size=5000, target_doc_len=600, target_sent_len=1024,
word_split=True)
train_data = data_builder.get_train_data()
test_data = data_builder.get_test_data()
model = rnn_concat(data_builder)
input_x = [np.stack(train_data.value["k_doc"].as_matrix()), np.stack(train_data.value["u_doc"].as_matrix())]
val_x = [np.stack(test_data.value["k_doc"].as_matrix()),
np.stack(test_data.value["u_doc"].as_matrix())]
val_y = test_data.label_doc
# TRAIN \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
history = model.fit(
input_x,
train_data.label_doc,
epochs=8, batch_size=32,
callbacks=[roc_callback(training_data=(input_x, train_data.label_doc), validation_data=(val_x, val_y))]
, validation_data=(val_x, val_y)
)
pred_output = model.predict(x=[np.stack(test_data.value["k_doc"].as_matrix()),
np.stack(test_data.value["u_doc"].as_matrix())],
batch_size=32)
acc = accuracy_score(test_data.label_doc, np.rint(pred_output).astype(int))
logging.info("ACCU: " + str(acc))
roc_result = roc_auc_score(test_data.label_doc, pred_output)
logging.info("ROC: " + str(roc_result))
print(pred_output[:10])
pyplot.plot(history.history['loss'])
pyplot.plot(history.history['val_loss'])
pyplot.title('model train vs validation loss')
pyplot.ylabel('loss')
pyplot.xlabel('epoch')
pyplot.legend(['train', 'validation'], loc='upper right')
pyplot.show()
pyplot.plot(history.history['train_roc'])
pyplot.plot(history.history['val_roc'])
pyplot.title('model train vs validation ROC AUC')
pyplot.ylabel('AUC')
pyplot.xlabel('epoch')
pyplot.legend(['train', 'validation'], loc='upper right')
pyplot.show()
pyplot.hist(pred_output, 20, (0.0, 1.0))
pyplot.show()
# get_k_gru_output = K.function(model.input,
# [model.get_layer("gru_layer").get_output_at(0),
# model.get_layer("gru_layer").get_output_at(1)])
# layer_output = get_k_gru_output([test_data.value["k_doc"][0].reshape([1,400]),
# test_data.value["u_doc"][0].reshape([1,400])] )
# print(layer_output)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
try_pan()
| 36.383803 | 113 | 0.61802 |
4a1ff3f1b5234065676cc5404ef7fa6e951183de | 1,397 | py | Python | day01/test/test_main.py | JoseTomasTocino/AdventOfCode2020 | 19b22c3f9ef2331f08c2ad78f49f200a5f4adfc9 | [
"MIT"
] | null | null | null | day01/test/test_main.py | JoseTomasTocino/AdventOfCode2020 | 19b22c3f9ef2331f08c2ad78f49f200a5f4adfc9 | [
"MIT"
] | null | null | null | day01/test/test_main.py | JoseTomasTocino/AdventOfCode2020 | 19b22c3f9ef2331f08c2ad78f49f200a5f4adfc9 | [
"MIT"
] | null | null | null | import logging
import os
from day01.code.main import process_input, get_number_group
logger = logging.getLogger(__name__)
local_path = os.path.abspath(os.path.dirname(__file__))
def test_case_1():
input_str = """1721
979
366
299
675
1456"""
assert process_input(input_str) == 514579
def test_case_2():
input_str = """1721
979
366
299
675
1456"""
assert process_input(input_str, num_components=3) == 241861950
def test_input():
with open(os.path.join(local_path, "input"), "r") as f:
input_str = f.read()
output = process_input(input_str)
logger.info(f"Output: {output}")
assert output == 388075
def test_input_with_three_components():
with open(os.path.join(local_path, "input"), "r") as f:
input_str = f.read()
output = process_input(input_str, num_components=3)
logger.info(f"Output: {output}")
assert output == 293450526
def test_get_number_group():
numbers_in = [1, 2, 3, 4]
numbers_out = list(get_number_group(numbers_in, num_components=1))
assert numbers_out == [[1], [2], [3], [4]]
numbers_out = list(get_number_group(numbers_in, num_components=2))
assert numbers_out == [[1, 2], [1, 3], [1, 4], [2, 3], [2, 4], [3, 4]]
numbers_out = list(get_number_group(numbers_in, num_components=3))
assert numbers_out == [[1, 2, 3], [1, 2, 4], [1, 3, 4], [2, 3, 4]]
| 22.532258 | 74 | 0.651396 |
4a1ff42d0271009f819271ad7a38e1f4ceb8e91c | 3,241 | py | Python | MCTS/MCS.py | hojunkim13/master2048 | 2ef6c4674197f8a38853195307b50b6e964f4284 | [
"MIT"
] | null | null | null | MCTS/MCS.py | hojunkim13/master2048 | 2ef6c4674197f8a38853195307b50b6e964f4284 | [
"MIT"
] | null | null | null | MCTS/MCS.py | hojunkim13/master2048 | 2ef6c4674197f8a38853195307b50b6e964f4284 | [
"MIT"
] | null | null | null | import os, sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
import numpy as np
import time
from Environment.BitEnv import *
from Environment.BitEnv import _2048
from multiprocessing import Pool
class MCTS:
def __init__(self):
pass
def heuristic(self, grid):
"""
1. Decide (Up Down) or (Left Right) by sum of merged tiles
2. Decide direction by less moved tile
"""
values = [0, 0, 0, 0]
grids = []
differences = [0, 0, 0, 0]
for move in range(0, 4):
grid_, difference = moveForHeuristic(grid, move)
grids.append(grid_)
if not difference:
continue
values[move] = len(getFreeTile(grid_))
differences[move] = difference
max_value = max(values)
candidates = []
for move in range(4):
if values[move] == max_value:
candidates.append(move)
else:
differences[move] = 1e3
for c in candidates:
if differences[c] == min(differences):
heuristic_move = c
return grids[heuristic_move]
def rollout(self, first_move):
grid, _ = moveGrid(self.root_grid, first_move)
step = 0
done = False
while not done:
free_tiles = getFreeTile(grid)
if not free_tiles and not getLegalMoves(grid):
done = True
move = random.randint(0, 3)
grid, changed = moveGrid(grid, move)
if changed:
step += 1
# grid = self.heuristic(grid)
# step += 1
if step >= 100:
break
return step
def getAction(self, root_grid, n_sim):
self.root_grid = root_grid
legal_moves = getLegalMoves(root_grid)
values = {k: 0 for k in legal_moves}
visits = {k: 0 for k in legal_moves}
first_move_sequence = np.random.choice(legal_moves, size=n_sim)
with Pool(6) as p:
res = p.map(self.rollout, first_move_sequence)
for value, move in zip(res, first_move_sequence):
values[move] += value
visits[move] += 1
best_move = max(legal_moves, key=lambda x: values[x] / visits[x])
return best_move
def main(n_episode, n_sim):
mcts = MCTS()
score_list = []
for e in range(n_episode):
start_time = time.time()
done = False
score = 0
grid = env.reset()
while not done:
env.render()
action = mcts.getAction(grid, n_sim)
grid, reward, done, info = env.step(action)
score += reward
score_list.append(score)
average_score = np.mean(score_list[-100:])
spending_time = time.time() - start_time
max_tile = np.max(grid2Board(grid))
print(
f"Episode : {e+1} / {n_episode}, Score : {score}, Max Tile : {max_tile}, Average: {average_score:.1f}"
)
print(f"SPENDING TIME : {spending_time:.1f} Sec\n")
env.close()
if __name__ == "__main__":
env = _2048()
n_episode = 1
n_sim = 100
main(n_episode=n_episode, n_sim=n_sim)
| 29.733945 | 114 | 0.556001 |
4a1ff47dc7c0628e78cbf9e9df1c53cb13a43bbd | 631 | py | Python | utils/voice_analyser.py | roi3363/music-pytheory | 53cc3c4f0025acef75411ff85e1ce365912267be | [
"MIT"
] | 2 | 2019-09-24T09:41:37.000Z | 2020-07-26T20:21:05.000Z | utils/voice_analyser.py | roi3363/music-pytheory | 53cc3c4f0025acef75411ff85e1ce365912267be | [
"MIT"
] | null | null | null | utils/voice_analyser.py | roi3363/music-pytheory | 53cc3c4f0025acef75411ff85e1ce365912267be | [
"MIT"
] | null | null | null | # import os
# import struct
# import sys
# import time
#
# import pyaudio
# import numpy as np
# from matplotlib import pyplot as plt
# from matplotlib.animation import FuncAnimation
#
# CHUNK = 1024 * 4
# RATE = 44100
# FORMAT = pyaudio.paInt16
# CHANNELS = 1
#
# p = pyaudio.PyAudio()
#
# stream = p.open(
# format=FORMAT,
# rate=RATE,
# channels=1,
# frames_per_buffer=CHUNK,
# input=True
# )
#
#
# fig, ax = plt.subplots()
# line, = ax.plot(1, 100)
#
# while True:
# data = np.frombuffer(stream.read(CHUNK), dtype=np.int16)
# print('#' + '#' * int(np.average(data)))
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
| 12.877551 | 62 | 0.597464 |
4a1ff4e261ed68eb06f290a33a07b37453b27739 | 1,548 | py | Python | blending.py | RachitB11/dual-fisheye-video-stitching | 36eac03132531f3e3141a67f994997cccb78e5ef | [
"MIT"
] | null | null | null | blending.py | RachitB11/dual-fisheye-video-stitching | 36eac03132531f3e3141a67f994997cccb78e5ef | [
"MIT"
] | null | null | null | blending.py | RachitB11/dual-fisheye-video-stitching | 36eac03132531f3e3141a67f994997cccb78e5ef | [
"MIT"
] | null | null | null | #!/usr/bin/python
import numpy as np
import cv2
def GaussianPyramid(img, leveln):
GP = [img]
for i in range(leveln - 1):
GP.append(cv2.pyrDown(GP[i]))
return GP
def LaplacianPyramid(img, leveln):
LP = []
for i in range(leveln - 1):
next_img = cv2.pyrDown(img)
LP.append(img - cv2.pyrUp(next_img, img.shape[1::-1]))
img = next_img
LP.append(img)
return LP
def blend_pyramid(LPA, LPB, MP):
blended = []
for i, M in enumerate(MP):
blended.append(LPA[i] * M + LPB[i] * (1.0 - M))
return blended
def reconstruct(LS):
img = LS[-1]
for lev_img in LS[-2::-1]:
img = cv2.pyrUp(img, lev_img.shape[1::-1])
img += lev_img
return img
def multi_band_blending(img1, img2, mask, leveln=6):
max_leveln = int(np.floor(np.log2(min(img1.shape[0], img1.shape[1],
img2.shape[0], img2.shape[1]))))
if leveln is None:
leveln = max_leveln
if leveln < 1 or leveln > max_leveln:
print("warning: inappropriate number of leveln")
leveln = max_leveln
# Get Gaussian pyramid and Laplacian pyramid
MP = GaussianPyramid(mask, leveln)
LPA = LaplacianPyramid(img1.astype(np.float64), leveln)
LPB = LaplacianPyramid(img2.astype(np.float64), leveln)
# Blend two Laplacian pyramidspass
blended = blend_pyramid(LPA, LPB, MP)
# Reconstruction process
result = reconstruct(blended)
result[result > 255] = 255
result[result < 0] = 0
return result
| 25.8 | 74 | 0.609173 |
4a1ff5046131d48c9a8d56c8878f771328daf972 | 6,940 | py | Python | accelbyte_py_sdk/api/dslogmanager/operations/terminated_servers/download_server_logs.py | AccelByte/accelbyte-python-sdk | dcd311fad111c59da828278975340fb92e0f26f7 | [
"MIT"
] | null | null | null | accelbyte_py_sdk/api/dslogmanager/operations/terminated_servers/download_server_logs.py | AccelByte/accelbyte-python-sdk | dcd311fad111c59da828278975340fb92e0f26f7 | [
"MIT"
] | 1 | 2021-10-13T03:46:58.000Z | 2021-10-13T03:46:58.000Z | accelbyte_py_sdk/api/dslogmanager/operations/terminated_servers/download_server_logs.py | AccelByte/accelbyte-python-sdk | dcd311fad111c59da828278975340fb92e0f26f7 | [
"MIT"
] | null | null | null | # Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: justice_py_sdk_codegen/__main__.py
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
# justice-ds-log-manager-service (2.3.2)
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from .....core import Operation
from .....core import HeaderStr
from .....core import HttpResponse
from ...models import ResponseError
class DownloadServerLogs(Operation):
"""Download dedicated server log files (downloadServerLogs)
Required permission: ADMIN:NAMESPACE:{namespace}:DSLM:LOG [READ]
Required scope: social
This endpoint will download dedicated server's log file (.log).
Required Permission(s):
- ADMIN:NAMESPACE:{namespace}:DSLM:LOG [READ]
Required Scope(s):
- social
Properties:
url: /dslogmanager/namespaces/{namespace}/servers/{podName}/logs/download
method: GET
tags: ["Terminated Servers"]
consumes: ["application/json"]
produces: ["application/json", "text/x-log"]
securities: [BEARER_AUTH]
namespace: (namespace) REQUIRED str in path
pod_name: (podName) REQUIRED str in path
Responses:
200: OK - (server logs downloaded.)
404: Not Found - ResponseError (Not Found)
500: Internal Server Error - ResponseError (Internal Server Error)
"""
# region fields
_url: str = "/dslogmanager/namespaces/{namespace}/servers/{podName}/logs/download"
_method: str = "GET"
_consumes: List[str] = ["application/json"]
_produces: List[str] = ["application/json", "text/x-log"]
_securities: List[List[str]] = [["BEARER_AUTH"]]
_location_query: str = None
namespace: str # REQUIRED in [path]
pod_name: str # REQUIRED in [path]
# endregion fields
# region properties
@property
def url(self) -> str:
return self._url
@property
def method(self) -> str:
return self._method
@property
def consumes(self) -> List[str]:
return self._consumes
@property
def produces(self) -> List[str]:
return self._produces
@property
def securities(self) -> List[List[str]]:
return self._securities
@property
def location_query(self) -> str:
return self._location_query
# endregion properties
# region get methods
# endregion get methods
# region get_x_params methods
def get_all_params(self) -> dict:
return {
"path": self.get_path_params(),
}
def get_path_params(self) -> dict:
result = {}
if hasattr(self, "namespace"):
result["namespace"] = self.namespace
if hasattr(self, "pod_name"):
result["podName"] = self.pod_name
return result
# endregion get_x_params methods
# region is/has methods
# endregion is/has methods
# region with_x methods
def with_namespace(self, value: str) -> DownloadServerLogs:
self.namespace = value
return self
def with_pod_name(self, value: str) -> DownloadServerLogs:
self.pod_name = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result: dict = {}
if hasattr(self, "namespace") and self.namespace:
result["namespace"] = str(self.namespace)
elif include_empty:
result["namespace"] = ""
if hasattr(self, "pod_name") and self.pod_name:
result["podName"] = str(self.pod_name)
elif include_empty:
result["podName"] = ""
return result
# endregion to methods
# region response methods
# noinspection PyMethodMayBeStatic
def parse_response(self, code: int, content_type: str, content: Any) -> Tuple[Union[None, HttpResponse], Union[None, HttpResponse, ResponseError]]:
"""Parse the given response.
200: OK - (server logs downloaded.)
404: Not Found - ResponseError (Not Found)
500: Internal Server Error - ResponseError (Internal Server Error)
---: HttpResponse (Undocumented Response)
---: HttpResponse (Unexpected Content-Type Error)
---: HttpResponse (Unhandled Error)
"""
pre_processed_response, error = self.pre_process_response(code=code, content_type=content_type, content=content)
if error is not None:
return None, None if error.is_no_content() else error
code, content_type, content = pre_processed_response
if code == 200:
return HttpResponse.create(code, "OK"), None
if code == 404:
return None, ResponseError.create_from_dict(content)
if code == 500:
return None, ResponseError.create_from_dict(content)
return None, self.handle_undocumented_response(code=code, content_type=content_type, content=content)
# endregion response methods
# region static methods
@classmethod
def create(
cls,
namespace: str,
pod_name: str,
) -> DownloadServerLogs:
instance = cls()
instance.namespace = namespace
instance.pod_name = pod_name
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> DownloadServerLogs:
instance = cls()
if "namespace" in dict_ and dict_["namespace"] is not None:
instance.namespace = str(dict_["namespace"])
elif include_empty:
instance.namespace = ""
if "podName" in dict_ and dict_["podName"] is not None:
instance.pod_name = str(dict_["podName"])
elif include_empty:
instance.pod_name = ""
return instance
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"namespace": "namespace",
"podName": "pod_name",
}
@staticmethod
def get_required_map() -> Dict[str, bool]:
return {
"namespace": True,
"podName": True,
}
# endregion static methods
| 28.326531 | 151 | 0.628098 |
4a1ff5b45714e6068518d7e8c4c6462a36d5a3bf | 636 | py | Python | LISTS2/NUMBER9.py | MakarFadeev/PythonTasks | 2ae18c3c6a50808f985966d3304a6af6824ce686 | [
"Apache-2.0"
] | 2 | 2020-11-13T05:59:45.000Z | 2020-11-29T09:26:20.000Z | LISTS2/NUMBER9.py | MakarFadeev/PythonTasks | 2ae18c3c6a50808f985966d3304a6af6824ce686 | [
"Apache-2.0"
] | 1 | 2020-10-29T18:14:10.000Z | 2020-10-29T18:14:10.000Z | LISTS2/NUMBER9.py | MakarFadeev/PythonTasks | 2ae18c3c6a50808f985966d3304a6af6824ce686 | [
"Apache-2.0"
] | null | null | null | howMany = int(input('Сколько чисел вы хотите ввести? '))
array = []
better = 9999999999999
betterLength = 99999999999999
n = int(input('Введите главное число: '))
for i in range(0, howMany):
array.append(int(input('Введите число: ')))
if (array[i] > n):
if (array[i] - n < betterLength):
better = array[i]
betterLength = array[i] - n
if (array[i] < n):
if (n - array[i] < betterLength):
better = array[i]
betterLength = n - array[i]
if (array[i] == n):
better = array[i]
betterLength = n - array[i]
print('Самое близкое число: ', better)
| 28.909091 | 56 | 0.564465 |
4a1ff5ef55166327c66925bb0715d0566a66ef62 | 120 | py | Python | crawler/gather/pipelines/__init__.py | shifei123/test | 26611d8f63957a6e3899ecb56ede3e1bed260105 | [
"Apache-2.0"
] | 283 | 2016-04-07T15:51:01.000Z | 2022-02-17T15:50:25.000Z | crawler/gather/pipelines/__init__.py | taogeT/flask_livetv | 26611d8f63957a6e3899ecb56ede3e1bed260105 | [
"Apache-2.0"
] | 2 | 2017-03-15T07:56:49.000Z | 2018-09-20T05:56:23.000Z | crawler/gather/pipelines/__init__.py | taogeT/flask_livetv | 26611d8f63957a6e3899ecb56ede3e1bed260105 | [
"Apache-2.0"
] | 127 | 2016-04-15T14:40:45.000Z | 2021-04-07T07:57:02.000Z | # -*- coding: utf-8 -*-
from .database import CurrentPipeline, StatisticPipeline
from .harddisk import HardDiskPipeline
| 30 | 56 | 0.783333 |
4a1ff7bbd3375bdda5ee7647898aaf160ce9135c | 9,379 | py | Python | tests/test_pipeline.py | lucianolorenti/Temporis | 90004e28beb3d1bbec474c262f92ed042045e556 | [
"MIT"
] | null | null | null | tests/test_pipeline.py | lucianolorenti/Temporis | 90004e28beb3d1bbec474c262f92ed042045e556 | [
"MIT"
] | null | null | null | tests/test_pipeline.py | lucianolorenti/Temporis | 90004e28beb3d1bbec474c262f92ed042045e556 | [
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
from scipy.stats import entropy
from temporis.dataset.ts_dataset import AbstractTimeSeriesDataset
from temporis.transformation import Concatenate as TransformationConcatenate
from temporis.transformation import Transformer
from temporis.transformation.features.imputers import PerColumnImputer
from temporis.transformation.features.outliers import IQROutlierRemover
from temporis.transformation.features.scalers import MinMaxScaler
from temporis.transformation.features.selection import ByNameFeatureSelector
from temporis.transformation.features.split import SplitByCategory
from temporis.transformation.features.transformation import MeanCentering
from temporis.transformation.functional.graph_utils import root_nodes
from temporis.transformation.functional.pipeline.utils import make_pipeline
from temporis.transformation.functional.concatenate import Concatenate
def gaussian(N: int, mean: float = 50, std: float = 10):
return np.random.randn(N) * std + mean
class MockDatasetCategorical(AbstractTimeSeriesDataset):
def build_df(self):
N = 50
return pd.DataFrame(
{
"Categorical": ["a"] * N + ["b"] * N,
"feature1": np.hstack(
(gaussian(N, self.mean_a_f1), gaussian(N, self.mean_b_f1))
),
"feature2": np.hstack(
(gaussian(N, self.mean_a_f2), gaussian(N, self.mean_b_f2))
),
}
)
def __init__(self, N:int = 5):
super().__init__()
self.mean_a_f1 = 50
self.mean_b_f1 = -16
self.mean_a_f2 = 90
self.mean_b_f2 = 250
self.lives = [self.build_df() for i in range(N)]
life_4 = self.lives[4]
life_4.loc[life_4.index[50], 'feature1'] = 591212
life_4.loc[life_4.index[21],'feature2'] = 591212
life_3 = self.lives[3]
life_3.loc[life_4.index[88],'feature1'] = 591212
life_3.loc[life_4.index[25],'feature2'] = 591212
def get_time_series(self, i: int):
return self.lives[i]
@property
def rul_column(self):
return "RUL"
@property
def n_time_series(self):
return len(self.lives)
class MockDataset(AbstractTimeSeriesDataset):
def __init__(self):
super().__init__()
self.lives = [
pd.DataFrame({"a": [1, 2, 3, 4], "b": [2, 4, 6, 8], "RUL": [4, 3, 2, 1]}),
pd.DataFrame(
{"a": [150, 5, 14, 24], "b": [-52, -14, -36, 8], "RUL": [4, 3, 2, 1]}
),
]
def get_time_series(self, i: int):
return self.lives[i]
@property
def rul_column(self):
return "RUL"
@property
def n_time_series(self):
return len(self.lives)
class MockDataset1(AbstractTimeSeriesDataset):
def __init__(self):
super().__init__()
self.lives = [
pd.DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4], "RUL": [4, 3, 2, 1]}),
pd.DataFrame({"a": [2, 4, 6, 8], "b": [2, 4, 6, 8], "RUL": [4, 3, 2, 1]}),
]
def get_time_series(self, i: int):
return self.lives[i]
@property
def rul_column(self):
return "RUL"
@property
def n_time_series(self):
return len(self.lives)
class MockDataset2(AbstractTimeSeriesDataset):
def __init__(self, n:int = 5):
super().__init__()
self.lives = [
pd.DataFrame({"a": ['A', 'A', 'A', 'A'], "b": [1, 2, 3, 4], "RUL": [4, 3, 2, 1]})
for i in range(n)
]
def get_time_series(self, i: int):
return self.lives[i]
@property
def rul_column(self):
return "RUL"
@property
def n_time_series(self):
return len(self.lives)
class TestPipeline:
def test_FitOrder(self):
dataset = MockDataset()
pipe = ByNameFeatureSelector(["a", "b"])
pipe = MeanCentering()(pipe)
pipe = MinMaxScaler((-1, 1), name="Scaler")(pipe)
target_pipe = ByNameFeatureSelector(["RUL"])
test_transformer = Transformer(transformerX=pipe, transformerY=target_pipe)
test_transformer.fit(dataset)
X, y, sw = test_transformer.transform(dataset[0])
assert X.shape[1] == 2
df_dataset = dataset.to_pandas()
centered_df = df_dataset[["a", "b"]] - df_dataset[["a", "b"]].mean()
scaler = test_transformer.transformerX.find_node("Scaler")
assert scaler.data_min.equals(centered_df.min(axis=0))
assert scaler.data_max.equals(centered_df.max(axis=0))
def test_FitOrder2(self):
dataset = MockDataset()
pipe_a = ByNameFeatureSelector(["a"])
pipe_a = MeanCentering()(pipe_a)
scaler_a = MinMaxScaler((-1, 1), name="a")
pipe_a = scaler_a(pipe_a)
pipe_b = ByNameFeatureSelector(["b"])
pipe_b = MeanCentering()(pipe_b)
scaler_b = MinMaxScaler((-1, 1), name="b")
pipe_b = scaler_b(pipe_b)
pipe = TransformationConcatenate()([pipe_a, pipe_b])
target_pipe = ByNameFeatureSelector(["RUL"])
test_transformer = Transformer(transformerX=pipe, transformerY=target_pipe)
test_transformer.fit(dataset)
X, y, sw = test_transformer.transform(dataset[0])
assert X.shape[1] == 2
df_dataset = dataset.to_pandas()
centered_df = df_dataset[["a", "b"]] - df_dataset[["a", "b"]].mean()
assert scaler_a.data_min.equals(centered_df.min(axis=0)[["a"]])
assert scaler_b.data_max.equals(centered_df.max(axis=0)[["b"]])
def test_PandasConcatenate(self):
dataset = MockDataset1()
pipe = ByNameFeatureSelector(["a"])
pipe = MinMaxScaler((-1, 1))(pipe)
pipe2 = ByNameFeatureSelector(["b"])
pipe2 = MinMaxScaler((-5, 0))(pipe2)
pipe = TransformationConcatenate()([pipe, pipe2])
pipe = MeanCentering()(pipe)
target_pipe = ByNameFeatureSelector(["RUL"])
test_transformer = Transformer(transformerX=pipe, transformerY=target_pipe)
test_transformer.fit(dataset)
df = dataset.to_pandas()[["a", "b"]]
data_min = df.min()
data_max = df.max()
gt = (df - data_min) / (data_max - data_min)
gt["a"] = gt["a"] * (1 - (-1)) + (-1)
gt["b"] = gt["b"] * (0 - (-5)) + (-5)
gt = gt - gt.mean()
X, y, sw = test_transformer.transform(dataset[0])
assert (np.mean((gt.iloc[:4, :].values - X.values) ** 2)) < 0.0001
X, y, sw = test_transformer.transform(dataset[1])
assert (np.mean((gt.iloc[4:, :].values - X.values) ** 2)) < 0.0001
assert X.shape[1] == 2
def test_subpipeline(self):
dataset = MockDatasetCategorical()
pipe = ByNameFeatureSelector(["Categorical", "feature1", "feature2"])
bb = make_pipeline(
IQROutlierRemover(lower_quantile=0.05, upper_quantile=0.95, clip=True),
MinMaxScaler((-1, 1)),
PerColumnImputer(),
)
pipe = SplitByCategory("Categorical", bb)(pipe)
target_pipe = ByNameFeatureSelector(["RUL"])
test_transformer = Transformer(transformerX=pipe)
test_transformer.fit(dataset)
q = np.hstack([d[d['Categorical'] == 'a']['feature1'] for d in dataset])
approx_cat_a_feature1_1_quantile = np.quantile(q, 0.05)
approx_cat_a_feature1_3_quantile = np.quantile(q, 0.95)
r = root_nodes(pipe)[0]
IQR_Node = r.next[0].next[1].next[0]
real_cat_a_feature1_1_quantile = IQR_Node.Q1['feature1']
real_cat_a_feature1_3_quantile = IQR_Node.Q3['feature1']
assert approx_cat_a_feature1_1_quantile - real_cat_a_feature1_1_quantile < 5
assert approx_cat_a_feature1_3_quantile - real_cat_a_feature1_3_quantile < 5
assert test_transformer.transform(dataset[4])[0]['feature1'].iloc[50] -1 < 0.01
assert test_transformer.transform(dataset[4])[0]['feature2'].iloc[21] -1 < 0.01
d = dataset[4]
aa = d[d['Categorical'] == 'a']['feature1']
counts_before_transformation, _ = np.histogram(aa)
counts_before_transformation = counts_before_transformation / np.sum(counts_before_transformation)
bb = test_transformer.transform(dataset[4])[0]['feature1']
counts_after_transformation, _ = np.histogram(bb[:50])
counts_after_transformation = counts_after_transformation / np.sum(counts_after_transformation)
assert entropy(counts_before_transformation, counts_after_transformation) < 0.01
def test_split_one_category(self):
dataset_orig = MockDataset2()
dataset = dataset_orig[0:5]
pipe = ByNameFeatureSelector(["a", "b"])
scaler_pipe = make_pipeline(MinMaxScaler((-1, 1), name="Scaler"))
pipe = SplitByCategory('a', scaler_pipe, add_default=False)(pipe)
pipe = MeanCentering()(pipe)
pipe = MinMaxScaler((-1, 1), name="Scaler2")(pipe)
target_pipe = ByNameFeatureSelector(["RUL"])
test_transformer = Transformer(transformerX=pipe, transformerY=target_pipe)
test_transformer.fit(dataset)
X, y, sw = test_transformer.transform(dataset[0])
assert X.shape[1] == 1
| 32.230241 | 106 | 0.611686 |
4a1ff89d74cc656266e22d473f036b397e0105a4 | 37,103 | py | Python | tests/unit_tests/sql_parse_tests.py | navono/superset | 2daa07163326b8555488dab523c5479cf92821cf | [
"Apache-2.0"
] | 1 | 2022-02-10T11:30:05.000Z | 2022-02-10T11:30:05.000Z | tests/unit_tests/sql_parse_tests.py | navono/superset | 2daa07163326b8555488dab523c5479cf92821cf | [
"Apache-2.0"
] | 10 | 2022-01-05T01:31:07.000Z | 2022-03-16T01:09:46.000Z | tests/unit_tests/sql_parse_tests.py | navono/superset | 2daa07163326b8555488dab523c5479cf92821cf | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, redefined-outer-name, unused-argument, protected-access, too-many-lines
import unittest
from typing import Optional, Set
import pytest
import sqlparse
from pytest_mock import MockerFixture
from sqlalchemy import text
from sqlparse.sql import Identifier, Token, TokenList
from sqlparse.tokens import Name
from superset.exceptions import QueryClauseValidationException
from superset.sql_parse import (
add_table_name,
get_rls_for_table,
has_table_query,
insert_rls,
ParsedQuery,
sanitize_clause,
strip_comments_from_sql,
Table,
)
def extract_tables(query: str) -> Set[Table]:
"""
Helper function to extract tables referenced in a query.
"""
return ParsedQuery(query).tables
def test_table() -> None:
"""
Test the ``Table`` class and its string conversion.
Special characters in the table, schema, or catalog name should be escaped correctly.
"""
assert str(Table("tbname")) == "tbname"
assert str(Table("tbname", "schemaname")) == "schemaname.tbname"
assert (
str(Table("tbname", "schemaname", "catalogname"))
== "catalogname.schemaname.tbname"
)
assert (
str(Table("table.name", "schema/name", "catalog\nname"))
== "catalog%0Aname.schema%2Fname.table%2Ename"
)
def test_extract_tables() -> None:
"""
Test that referenced tables are parsed correctly from the SQL.
"""
assert extract_tables("SELECT * FROM tbname") == {Table("tbname")}
assert extract_tables("SELECT * FROM tbname foo") == {Table("tbname")}
assert extract_tables("SELECT * FROM tbname AS foo") == {Table("tbname")}
# underscore
assert extract_tables("SELECT * FROM tb_name") == {Table("tb_name")}
# quotes
assert extract_tables('SELECT * FROM "tbname"') == {Table("tbname")}
# unicode
assert extract_tables('SELECT * FROM "tb_name" WHERE city = "Lübeck"') == {
Table("tb_name")
}
# columns
assert extract_tables("SELECT field1, field2 FROM tb_name") == {Table("tb_name")}
assert extract_tables("SELECT t1.f1, t2.f2 FROM t1, t2") == {
Table("t1"),
Table("t2"),
}
# named table
assert extract_tables("SELECT a.date, a.field FROM left_table a LIMIT 10") == {
Table("left_table")
}
# reverse select
assert extract_tables("FROM t1 SELECT field") == {Table("t1")}
def test_extract_tables_subselect() -> None:
"""
Test that tables inside subselects are parsed correctly.
"""
assert (
extract_tables(
"""
SELECT sub.*
FROM (
SELECT *
FROM s1.t1
WHERE day_of_week = 'Friday'
) sub, s2.t2
WHERE sub.resolution = 'NONE'
"""
)
== {Table("t1", "s1"), Table("t2", "s2")}
)
assert (
extract_tables(
"""
SELECT sub.*
FROM (
SELECT *
FROM s1.t1
WHERE day_of_week = 'Friday'
) sub
WHERE sub.resolution = 'NONE'
"""
)
== {Table("t1", "s1")}
)
assert (
extract_tables(
"""
SELECT * FROM t1
WHERE s11 > ANY (
SELECT COUNT(*) /* no hint */ FROM t2
WHERE NOT EXISTS (
SELECT * FROM t3
WHERE ROW(5*t2.s1,77)=(
SELECT 50,11*s1 FROM t4
)
)
)
"""
)
== {Table("t1"), Table("t2"), Table("t3"), Table("t4")}
)
def test_extract_tables_select_in_expression() -> None:
"""
Test that parser works with ``SELECT``s used as expressions.
"""
assert extract_tables("SELECT f1, (SELECT count(1) FROM t2) FROM t1") == {
Table("t1"),
Table("t2"),
}
assert extract_tables("SELECT f1, (SELECT count(1) FROM t2) as f2 FROM t1") == {
Table("t1"),
Table("t2"),
}
def test_extract_tables_parenthesis() -> None:
"""
Test that parenthesis are parsed correctly.
"""
assert extract_tables("SELECT f1, (x + y) AS f2 FROM t1") == {Table("t1")}
def test_extract_tables_with_schema() -> None:
"""
Test that schemas are parsed correctly.
"""
assert extract_tables("SELECT * FROM schemaname.tbname") == {
Table("tbname", "schemaname")
}
assert extract_tables('SELECT * FROM "schemaname"."tbname"') == {
Table("tbname", "schemaname")
}
assert extract_tables('SELECT * FROM "schemaname"."tbname" foo') == {
Table("tbname", "schemaname")
}
assert extract_tables('SELECT * FROM "schemaname"."tbname" AS foo') == {
Table("tbname", "schemaname")
}
def test_extract_tables_union() -> None:
"""
Test that ``UNION`` queries work as expected.
"""
assert extract_tables("SELECT * FROM t1 UNION SELECT * FROM t2") == {
Table("t1"),
Table("t2"),
}
assert extract_tables("SELECT * FROM t1 UNION ALL SELECT * FROM t2") == {
Table("t1"),
Table("t2"),
}
assert extract_tables("SELECT * FROM t1 INTERSECT ALL SELECT * FROM t2") == {
Table("t1"),
Table("t2"),
}
def test_extract_tables_select_from_values() -> None:
"""
Test that selecting from values returns no tables.
"""
assert extract_tables("SELECT * FROM VALUES (13, 42)") == set()
def test_extract_tables_select_array() -> None:
"""
Test that queries selecting arrays work as expected.
"""
assert (
extract_tables(
"""
SELECT ARRAY[1, 2, 3] AS my_array
FROM t1 LIMIT 10
"""
)
== {Table("t1")}
)
def test_extract_tables_select_if() -> None:
"""
Test that queries with an ``IF`` work as expected.
"""
assert (
extract_tables(
"""
SELECT IF(CARDINALITY(my_array) >= 3, my_array[3], NULL)
FROM t1 LIMIT 10
"""
)
== {Table("t1")}
)
def test_extract_tables_with_catalog() -> None:
"""
Test that catalogs are parsed correctly.
"""
assert extract_tables("SELECT * FROM catalogname.schemaname.tbname") == {
Table("tbname", "schemaname", "catalogname")
}
def test_extract_tables_illdefined() -> None:
"""
Test that ill-defined tables return an empty set.
"""
assert extract_tables("SELECT * FROM schemaname.") == set()
assert extract_tables("SELECT * FROM catalogname.schemaname.") == set()
assert extract_tables("SELECT * FROM catalogname..") == set()
assert extract_tables("SELECT * FROM catalogname..tbname") == set()
@unittest.skip("Requires sqlparse>=3.1")
def test_extract_tables_show_tables_from() -> None:
"""
Test ``SHOW TABLES FROM``.
This is currently broken in the pinned version of sqlparse, and fixed in
``sqlparse>=3.1``. However, ``sqlparse==3.1`` breaks some sql formatting.
"""
assert extract_tables("SHOW TABLES FROM s1 like '%order%'") == set()
def test_extract_tables_show_columns_from() -> None:
"""
Test ``SHOW COLUMNS FROM``.
"""
assert extract_tables("SHOW COLUMNS FROM t1") == {Table("t1")}
def test_extract_tables_where_subquery() -> None:
"""
Test that tables in a ``WHERE`` subquery are parsed correctly.
"""
assert (
extract_tables(
"""
SELECT name
FROM t1
WHERE regionkey = (SELECT max(regionkey) FROM t2)
"""
)
== {Table("t1"), Table("t2")}
)
assert (
extract_tables(
"""
SELECT name
FROM t1
WHERE regionkey IN (SELECT regionkey FROM t2)
"""
)
== {Table("t1"), Table("t2")}
)
assert (
extract_tables(
"""
SELECT name
FROM t1
WHERE regionkey EXISTS (SELECT regionkey FROM t2)
"""
)
== {Table("t1"), Table("t2")}
)
def test_extract_tables_describe() -> None:
"""
Test ``DESCRIBE``.
"""
assert extract_tables("DESCRIBE t1") == {Table("t1")}
def test_extract_tables_show_partitions() -> None:
"""
Test ``SHOW PARTITIONS``.
"""
assert (
extract_tables(
"""
SHOW PARTITIONS FROM orders
WHERE ds >= '2013-01-01' ORDER BY ds DESC
"""
)
== {Table("orders")}
)
def test_extract_tables_join() -> None:
"""
Test joins.
"""
assert extract_tables("SELECT t1.*, t2.* FROM t1 JOIN t2 ON t1.a = t2.a;") == {
Table("t1"),
Table("t2"),
}
assert (
extract_tables(
"""
SELECT a.date, b.name
FROM left_table a
JOIN (
SELECT
CAST((b.year) as VARCHAR) date,
name
FROM right_table
) b
ON a.date = b.date
"""
)
== {Table("left_table"), Table("right_table")}
)
assert (
extract_tables(
"""
SELECT a.date, b.name
FROM left_table a
LEFT INNER JOIN (
SELECT
CAST((b.year) as VARCHAR) date,
name
FROM right_table
) b
ON a.date = b.date
"""
)
== {Table("left_table"), Table("right_table")}
)
assert (
extract_tables(
"""
SELECT a.date, b.name
FROM left_table a
RIGHT OUTER JOIN (
SELECT
CAST((b.year) as VARCHAR) date,
name
FROM right_table
) b
ON a.date = b.date
"""
)
== {Table("left_table"), Table("right_table")}
)
assert (
extract_tables(
"""
SELECT a.date, b.name
FROM left_table a
FULL OUTER JOIN (
SELECT
CAST((b.year) as VARCHAR) date,
name
FROM right_table
) b
ON a.date = b.date
"""
)
== {Table("left_table"), Table("right_table")}
)
def test_extract_tables_semi_join() -> None:
"""
Test ``LEFT SEMI JOIN``.
"""
assert (
extract_tables(
"""
SELECT a.date, b.name
FROM left_table a
LEFT SEMI JOIN (
SELECT
CAST((b.year) as VARCHAR) date,
name
FROM right_table
) b
ON a.data = b.date
"""
)
== {Table("left_table"), Table("right_table")}
)
def test_extract_tables_combinations() -> None:
"""
Test a complex case with nested queries.
"""
assert (
extract_tables(
"""
SELECT * FROM t1
WHERE s11 > ANY (
SELECT * FROM t1 UNION ALL SELECT * FROM (
SELECT t6.*, t3.* FROM t6 JOIN t3 ON t6.a = t3.a
) tmp_join
WHERE NOT EXISTS (
SELECT * FROM t3
WHERE ROW(5*t3.s1,77)=(
SELECT 50,11*s1 FROM t4
)
)
)
"""
)
== {Table("t1"), Table("t3"), Table("t4"), Table("t6")}
)
assert (
extract_tables(
"""
SELECT * FROM (
SELECT * FROM (
SELECT * FROM (
SELECT * FROM EmployeeS
) AS S1
) AS S2
) AS S3
"""
)
== {Table("EmployeeS")}
)
def test_extract_tables_with() -> None:
"""
Test ``WITH``.
"""
assert (
extract_tables(
"""
WITH
x AS (SELECT a FROM t1),
y AS (SELECT a AS b FROM t2),
z AS (SELECT b AS c FROM t3)
SELECT c FROM z
"""
)
== {Table("t1"), Table("t2"), Table("t3")}
)
assert (
extract_tables(
"""
WITH
x AS (SELECT a FROM t1),
y AS (SELECT a AS b FROM x),
z AS (SELECT b AS c FROM y)
SELECT c FROM z
"""
)
== {Table("t1")}
)
def test_extract_tables_reusing_aliases() -> None:
"""
Test that the parser follows aliases.
"""
assert (
extract_tables(
"""
with q1 as ( select key from q2 where key = '5'),
q2 as ( select key from src where key = '5')
select * from (select key from q1) a
"""
)
== {Table("src")}
)
def test_extract_tables_multistatement() -> None:
"""
Test that the parser works with multiple statements.
"""
assert extract_tables("SELECT * FROM t1; SELECT * FROM t2") == {
Table("t1"),
Table("t2"),
}
assert extract_tables("SELECT * FROM t1; SELECT * FROM t2;") == {
Table("t1"),
Table("t2"),
}
def test_extract_tables_complex() -> None:
"""
Test a few complex queries.
"""
assert (
extract_tables(
"""
SELECT sum(m_examples) AS "sum__m_example"
FROM (
SELECT
COUNT(DISTINCT id_userid) AS m_examples,
some_more_info
FROM my_b_table b
JOIN my_t_table t ON b.ds=t.ds
JOIN my_l_table l ON b.uid=l.uid
WHERE
b.rid IN (
SELECT other_col
FROM inner_table
)
AND l.bla IN ('x', 'y')
GROUP BY 2
ORDER BY 2 ASC
) AS "meh"
ORDER BY "sum__m_example" DESC
LIMIT 10;
"""
)
== {
Table("my_l_table"),
Table("my_b_table"),
Table("my_t_table"),
Table("inner_table"),
}
)
assert (
extract_tables(
"""
SELECT *
FROM table_a AS a, table_b AS b, table_c as c
WHERE a.id = b.id and b.id = c.id
"""
)
== {Table("table_a"), Table("table_b"), Table("table_c")}
)
assert (
extract_tables(
"""
SELECT somecol AS somecol
FROM (
WITH bla AS (
SELECT col_a
FROM a
WHERE
1=1
AND column_of_choice NOT IN (
SELECT interesting_col
FROM b
)
),
rb AS (
SELECT yet_another_column
FROM (
SELECT a
FROM c
GROUP BY the_other_col
) not_table
LEFT JOIN bla foo
ON foo.prop = not_table.bad_col0
WHERE 1=1
GROUP BY
not_table.bad_col1 ,
not_table.bad_col2 ,
ORDER BY not_table.bad_col_3 DESC ,
not_table.bad_col4 ,
not_table.bad_col5
)
SELECT random_col
FROM d
WHERE 1=1
UNION ALL SELECT even_more_cols
FROM e
WHERE 1=1
UNION ALL SELECT lets_go_deeper
FROM f
WHERE 1=1
WHERE 2=2
GROUP BY last_col
LIMIT 50000
)
"""
)
== {Table("a"), Table("b"), Table("c"), Table("d"), Table("e"), Table("f")}
)
def test_extract_tables_mixed_from_clause() -> None:
"""
Test that the parser handles a ``FROM`` clause with table and subselect.
"""
assert (
extract_tables(
"""
SELECT *
FROM table_a AS a, (select * from table_b) AS b, table_c as c
WHERE a.id = b.id and b.id = c.id
"""
)
== {Table("table_a"), Table("table_b"), Table("table_c")}
)
def test_extract_tables_nested_select() -> None:
"""
Test that the parser handles selects inside functions.
"""
assert (
extract_tables(
"""
select (extractvalue(1,concat(0x7e,(select GROUP_CONCAT(TABLE_NAME)
from INFORMATION_SCHEMA.COLUMNS
WHERE TABLE_SCHEMA like "%bi%"),0x7e)));
"""
)
== {Table("COLUMNS", "INFORMATION_SCHEMA")}
)
assert (
extract_tables(
"""
select (extractvalue(1,concat(0x7e,(select GROUP_CONCAT(COLUMN_NAME)
from INFORMATION_SCHEMA.COLUMNS
WHERE TABLE_NAME="bi_achivement_daily"),0x7e)));
"""
)
== {Table("COLUMNS", "INFORMATION_SCHEMA")}
)
def test_extract_tables_complex_cte_with_prefix() -> None:
"""
Test that the parser handles CTEs with prefixes.
"""
assert (
extract_tables(
"""
WITH CTE__test (SalesPersonID, SalesOrderID, SalesYear)
AS (
SELECT SalesPersonID, SalesOrderID, YEAR(OrderDate) AS SalesYear
FROM SalesOrderHeader
WHERE SalesPersonID IS NOT NULL
)
SELECT SalesPersonID, COUNT(SalesOrderID) AS TotalSales, SalesYear
FROM CTE__test
GROUP BY SalesYear, SalesPersonID
ORDER BY SalesPersonID, SalesYear;
"""
)
== {Table("SalesOrderHeader")}
)
def test_extract_tables_identifier_list_with_keyword_as_alias() -> None:
"""
Test that aliases that are keywords are parsed correctly.
"""
assert (
extract_tables(
"""
WITH
f AS (SELECT * FROM foo),
match AS (SELECT * FROM f)
SELECT * FROM match
"""
)
== {Table("foo")}
)
def test_update() -> None:
"""
Test that ``UPDATE`` is not detected as ``SELECT``.
"""
assert ParsedQuery("UPDATE t1 SET col1 = NULL").is_select() is False
def test_set() -> None:
"""
Test that ``SET`` is detected correctly.
"""
query = ParsedQuery(
"""
-- comment
SET hivevar:desc='Legislators';
"""
)
assert query.is_set() is True
assert query.is_select() is False
assert ParsedQuery("set hivevar:desc='bla'").is_set() is True
assert ParsedQuery("SELECT 1").is_set() is False
def test_show() -> None:
"""
Test that ``SHOW`` is detected correctly.
"""
query = ParsedQuery(
"""
-- comment
SHOW LOCKS test EXTENDED;
-- comment
"""
)
assert query.is_show() is True
assert query.is_select() is False
assert ParsedQuery("SHOW TABLES").is_show() is True
assert ParsedQuery("shOw TABLES").is_show() is True
assert ParsedQuery("show TABLES").is_show() is True
assert ParsedQuery("SELECT 1").is_show() is False
def test_is_explain() -> None:
"""
Test that ``EXPLAIN`` is detected correctly.
"""
assert ParsedQuery("EXPLAIN SELECT 1").is_explain() is True
assert ParsedQuery("EXPLAIN SELECT 1").is_select() is False
assert (
ParsedQuery(
"""
-- comment
EXPLAIN select * from table
-- comment 2
"""
).is_explain()
is True
)
assert (
ParsedQuery(
"""
-- comment
EXPLAIN select * from table
where col1 = 'something'
-- comment 2
-- comment 3
EXPLAIN select * from table
where col1 = 'something'
-- comment 4
"""
).is_explain()
is True
)
assert (
ParsedQuery(
"""
-- This is a comment
-- this is another comment but with a space in the front
EXPLAIN SELECT * FROM TABLE
"""
).is_explain()
is True
)
assert (
ParsedQuery(
"""
/* This is a comment
with stars instead */
EXPLAIN SELECT * FROM TABLE
"""
).is_explain()
is True
)
assert (
ParsedQuery(
"""
-- comment
select * from table
where col1 = 'something'
-- comment 2
"""
).is_explain()
is False
)
def test_is_valid_ctas() -> None:
"""
Test if a query is a valid CTAS.
A valid CTAS has a ``SELECT`` as its last statement.
"""
assert (
ParsedQuery("SELECT * FROM table", strip_comments=True).is_valid_ctas() is True
)
assert (
ParsedQuery(
"""
-- comment
SELECT * FROM table
-- comment 2
""",
strip_comments=True,
).is_valid_ctas()
is True
)
assert (
ParsedQuery(
"""
-- comment
SET @value = 42;
SELECT @value as foo;
-- comment 2
""",
strip_comments=True,
).is_valid_ctas()
is True
)
assert (
ParsedQuery(
"""
-- comment
EXPLAIN SELECT * FROM table
-- comment 2
""",
strip_comments=True,
).is_valid_ctas()
is False
)
assert (
ParsedQuery(
"""
SELECT * FROM table;
INSERT INTO TABLE (foo) VALUES (42);
""",
strip_comments=True,
).is_valid_ctas()
is False
)
def test_is_valid_cvas() -> None:
"""
Test if a query is a valid CVAS.
A valid CVAS has a single ``SELECT`` statement.
"""
assert (
ParsedQuery("SELECT * FROM table", strip_comments=True).is_valid_cvas() is True
)
assert (
ParsedQuery(
"""
-- comment
SELECT * FROM table
-- comment 2
""",
strip_comments=True,
).is_valid_cvas()
is True
)
assert (
ParsedQuery(
"""
-- comment
SET @value = 42;
SELECT @value as foo;
-- comment 2
""",
strip_comments=True,
).is_valid_cvas()
is False
)
assert (
ParsedQuery(
"""
-- comment
EXPLAIN SELECT * FROM table
-- comment 2
""",
strip_comments=True,
).is_valid_cvas()
is False
)
assert (
ParsedQuery(
"""
SELECT * FROM table;
INSERT INTO TABLE (foo) VALUES (42);
""",
strip_comments=True,
).is_valid_cvas()
is False
)
def test_is_select_cte_with_comments() -> None:
"""
Some CTES with comments are not correctly identified as SELECTS.
"""
sql = ParsedQuery(
"""WITH blah AS
(SELECT * FROM core_dev.manager_team),
blah2 AS
(SELECT * FROM core_dev.manager_workspace)
SELECT * FROM blah
INNER JOIN blah2 ON blah2.team_id = blah.team_id"""
)
assert sql.is_select()
sql = ParsedQuery(
"""WITH blah AS
/*blahblahbalh*/
(SELECT * FROM core_dev.manager_team),
--blahblahbalh
blah2 AS
(SELECT * FROM core_dev.manager_workspace)
SELECT * FROM blah
INNER JOIN blah2 ON blah2.team_id = blah.team_id"""
)
assert sql.is_select()
def test_cte_is_select() -> None:
"""
Some CTEs are not correctly identified as SELECTS.
"""
# `AS(` gets parsed as a function
sql = ParsedQuery(
"""WITH foo AS(
SELECT
FLOOR(__time TO WEEK) AS "week",
name,
COUNT(DISTINCT user_id) AS "unique_users"
FROM "druid"."my_table"
GROUP BY 1,2
)
SELECT
f.week,
f.name,
f.unique_users
FROM foo f"""
)
assert sql.is_select()
def test_unknown_select() -> None:
"""
Test that `is_select` works when sqlparse fails to identify the type.
"""
sql = "WITH foo AS(SELECT 1) SELECT 1"
assert sqlparse.parse(sql)[0].get_type() == "UNKNOWN"
assert ParsedQuery(sql).is_select()
sql = "WITH foo AS(SELECT 1) INSERT INTO my_table (a) VALUES (1)"
assert sqlparse.parse(sql)[0].get_type() == "UNKNOWN"
assert not ParsedQuery(sql).is_select()
sql = "WITH foo AS(SELECT 1) DELETE FROM my_table"
assert sqlparse.parse(sql)[0].get_type() == "UNKNOWN"
assert not ParsedQuery(sql).is_select()
def test_get_query_with_new_limit_comment() -> None:
"""
Test that limit is applied correctly.
"""
query = ParsedQuery("SELECT * FROM birth_names -- SOME COMMENT")
assert query.set_or_update_query_limit(1000) == (
"SELECT * FROM birth_names -- SOME COMMENT\nLIMIT 1000"
)
def test_get_query_with_new_limit_comment_with_limit() -> None:
"""
Test that limits in comments are ignored.
"""
query = ParsedQuery("SELECT * FROM birth_names -- SOME COMMENT WITH LIMIT 555")
assert query.set_or_update_query_limit(1000) == (
"SELECT * FROM birth_names -- SOME COMMENT WITH LIMIT 555\nLIMIT 1000"
)
def test_get_query_with_new_limit_lower() -> None:
"""
Test that lower limits are not replaced.
"""
query = ParsedQuery("SELECT * FROM birth_names LIMIT 555")
assert query.set_or_update_query_limit(1000) == (
"SELECT * FROM birth_names LIMIT 555"
)
def test_get_query_with_new_limit_upper() -> None:
"""
Test that higher limits are replaced.
"""
query = ParsedQuery("SELECT * FROM birth_names LIMIT 2000")
assert query.set_or_update_query_limit(1000) == (
"SELECT * FROM birth_names LIMIT 1000"
)
def test_basic_breakdown_statements() -> None:
"""
Test that multiple statements are parsed correctly.
"""
query = ParsedQuery(
"""
SELECT * FROM birth_names;
SELECT * FROM birth_names LIMIT 1;
"""
)
assert query.get_statements() == [
"SELECT * FROM birth_names",
"SELECT * FROM birth_names LIMIT 1",
]
def test_messy_breakdown_statements() -> None:
"""
Test the messy multiple statements are parsed correctly.
"""
query = ParsedQuery(
"""
SELECT 1;\t\n\n\n \t
\t\nSELECT 2;
SELECT * FROM birth_names;;;
SELECT * FROM birth_names LIMIT 1
"""
)
assert query.get_statements() == [
"SELECT 1",
"SELECT 2",
"SELECT * FROM birth_names",
"SELECT * FROM birth_names LIMIT 1",
]
def test_sqlparse_formatting():
"""
Test that ``from_unixtime`` is formatted correctly.
``sqlparse==0.3.1`` has a bug and removes space between ``from`` and
``from_unixtime``, resulting in::
SELECT extract(HOUR
fromfrom_unixtime(hour_ts)
AT TIME ZONE 'America/Los_Angeles')
from table
"""
assert sqlparse.format(
"SELECT extract(HOUR from from_unixtime(hour_ts) "
"AT TIME ZONE 'America/Los_Angeles') from table",
reindent=True,
) == (
"SELECT extract(HOUR\n from from_unixtime(hour_ts) "
"AT TIME ZONE 'America/Los_Angeles')\nfrom table"
)
def test_strip_comments_from_sql() -> None:
"""
Test that comments are stripped out correctly.
"""
assert (
strip_comments_from_sql("SELECT col1, col2 FROM table1")
== "SELECT col1, col2 FROM table1"
)
assert (
strip_comments_from_sql("SELECT col1, col2 FROM table1\n-- comment")
== "SELECT col1, col2 FROM table1\n"
)
assert (
strip_comments_from_sql("SELECT '--abc' as abc, col2 FROM table1\n")
== "SELECT '--abc' as abc, col2 FROM table1"
)
def test_sanitize_clause_valid():
# regular clauses
assert sanitize_clause("col = 1") == "col = 1"
assert sanitize_clause("1=\t\n1") == "1=\t\n1"
assert sanitize_clause("(col = 1)") == "(col = 1)"
assert sanitize_clause("(col1 = 1) AND (col2 = 2)") == "(col1 = 1) AND (col2 = 2)"
assert sanitize_clause("col = 'abc' -- comment") == "col = 'abc' -- comment\n"
# Valid literal values that at could be flagged as invalid by a naive query parser
assert (
sanitize_clause("col = 'col1 = 1) AND (col2 = 2'")
== "col = 'col1 = 1) AND (col2 = 2'"
)
assert sanitize_clause("col = 'select 1; select 2'") == "col = 'select 1; select 2'"
assert sanitize_clause("col = 'abc -- comment'") == "col = 'abc -- comment'"
def test_sanitize_clause_closing_unclosed():
with pytest.raises(QueryClauseValidationException):
sanitize_clause("col1 = 1) AND (col2 = 2)")
def test_sanitize_clause_unclosed():
with pytest.raises(QueryClauseValidationException):
sanitize_clause("(col1 = 1) AND (col2 = 2")
def test_sanitize_clause_closing_and_unclosed():
with pytest.raises(QueryClauseValidationException):
sanitize_clause("col1 = 1) AND (col2 = 2")
def test_sanitize_clause_closing_and_unclosed_nested():
with pytest.raises(QueryClauseValidationException):
sanitize_clause("(col1 = 1)) AND ((col2 = 2)")
def test_sanitize_clause_multiple():
with pytest.raises(QueryClauseValidationException):
sanitize_clause("TRUE; SELECT 1")
def test_sqlparse_issue_652():
stmt = sqlparse.parse(r"foo = '\' AND bar = 'baz'")[0]
assert len(stmt.tokens) == 5
assert str(stmt.tokens[0]) == "foo = '\\'"
@pytest.mark.parametrize(
"sql,expected",
[
("SELECT * FROM table", True),
("SELECT a FROM (SELECT 1 AS a) JOIN (SELECT * FROM table)", True),
("(SELECT COUNT(DISTINCT name) AS foo FROM birth_names)", True),
("COUNT(*)", False),
("SELECT a FROM (SELECT 1 AS a)", False),
("SELECT a FROM (SELECT 1 AS a) JOIN table", True),
("SELECT * FROM (SELECT 1 AS foo, 2 AS bar) ORDER BY foo ASC, bar", False),
("SELECT * FROM other_table", True),
("extract(HOUR from from_unixtime(hour_ts)", False),
("(SELECT * FROM table)", True),
("(SELECT COUNT(DISTINCT name) from birth_names)", True),
],
)
def test_has_table_query(sql: str, expected: bool) -> None:
"""
Test if a given statement queries a table.
This is used to prevent ad-hoc metrics from querying unauthorized tables, bypassing
row-level security.
"""
statement = sqlparse.parse(sql)[0]
assert has_table_query(statement) == expected
@pytest.mark.parametrize(
"sql,table,rls,expected",
[
# Basic test: append RLS (some_table.id=42) to an existing WHERE clause.
(
"SELECT * FROM some_table WHERE 1=1",
"some_table",
"id=42",
"SELECT * FROM some_table WHERE ( 1=1) AND some_table.id=42",
),
# Any existing predicates MUST to be wrapped in parenthesis because AND has higher
# precedence than OR. If the RLS it `1=0` and we didn't add parenthesis a user
# could bypass it by crafting a query with `WHERE TRUE OR FALSE`, since
# `WHERE TRUE OR FALSE AND 1=0` evaluates to `WHERE TRUE OR (FALSE AND 1=0)`.
(
"SELECT * FROM some_table WHERE TRUE OR FALSE",
"some_table",
"1=0",
"SELECT * FROM some_table WHERE ( TRUE OR FALSE) AND 1=0",
),
# Here "table" is a reserved word; since sqlparse is too aggressive when
# characterizing reserved words we need to support them even when not quoted.
(
"SELECT * FROM table WHERE 1=1",
"table",
"id=42",
"SELECT * FROM table WHERE ( 1=1) AND table.id=42",
),
# RLS is only applied to queries reading from the associated table.
(
"SELECT * FROM table WHERE 1=1",
"other_table",
"id=42",
"SELECT * FROM table WHERE 1=1",
),
(
"SELECT * FROM other_table WHERE 1=1",
"table",
"id=42",
"SELECT * FROM other_table WHERE 1=1",
),
# If there's no pre-existing WHERE clause we create one.
(
"SELECT * FROM table",
"table",
"id=42",
"SELECT * FROM table WHERE table.id=42",
),
(
"SELECT * FROM some_table",
"some_table",
"id=42",
"SELECT * FROM some_table WHERE some_table.id=42",
),
(
"SELECT * FROM table ORDER BY id",
"table",
"id=42",
"SELECT * FROM table WHERE table.id=42 ORDER BY id",
),
(
"SELECT * FROM some_table;",
"some_table",
"id=42",
"SELECT * FROM some_table WHERE some_table.id=42 ;",
),
(
"SELECT * FROM some_table ;",
"some_table",
"id=42",
"SELECT * FROM some_table WHERE some_table.id=42 ;",
),
(
"SELECT * FROM some_table ",
"some_table",
"id=42",
"SELECT * FROM some_table WHERE some_table.id=42",
),
# We add the RLS even if it's already present, to be conservative. It should have
# no impact on the query, and it's easier than testing if the RLS is already
# present (it could be present in an OR clause, eg).
(
"SELECT * FROM table WHERE 1=1 AND table.id=42",
"table",
"id=42",
"SELECT * FROM table WHERE ( 1=1 AND table.id=42) AND table.id=42",
),
(
(
"SELECT * FROM table JOIN other_table ON "
"table.id = other_table.id AND other_table.id=42"
),
"other_table",
"id=42",
(
"SELECT * FROM table JOIN other_table ON other_table.id=42 "
"AND ( table.id = other_table.id AND other_table.id=42 )"
),
),
(
"SELECT * FROM table WHERE 1=1 AND id=42",
"table",
"id=42",
"SELECT * FROM table WHERE ( 1=1 AND id=42) AND table.id=42",
),
# For joins we apply the RLS to the ON clause, since it's easier and prevents
# leaking information about number of rows on OUTER JOINs.
(
"SELECT * FROM table JOIN other_table ON table.id = other_table.id",
"other_table",
"id=42",
(
"SELECT * FROM table JOIN other_table ON other_table.id=42 "
"AND ( table.id = other_table.id )"
),
),
(
(
"SELECT * FROM table JOIN other_table ON table.id = other_table.id "
"WHERE 1=1"
),
"other_table",
"id=42",
(
"SELECT * FROM table JOIN other_table ON other_table.id=42 "
"AND ( table.id = other_table.id ) WHERE 1=1"
),
),
# Subqueries also work, as expected.
(
"SELECT * FROM (SELECT * FROM other_table)",
"other_table",
"id=42",
"SELECT * FROM (SELECT * FROM other_table WHERE other_table.id=42 )",
),
# As well as UNION.
(
"SELECT * FROM table UNION ALL SELECT * FROM other_table",
"table",
"id=42",
"SELECT * FROM table WHERE table.id=42 UNION ALL SELECT * FROM other_table",
),
(
"SELECT * FROM table UNION ALL SELECT * FROM other_table",
"other_table",
"id=42",
(
"SELECT * FROM table UNION ALL "
"SELECT * FROM other_table WHERE other_table.id=42"
),
),
# When comparing fully qualified table names (eg, schema.table) to simple names
# (eg, table) we are also conservative, assuming the schema is the same, since
# we don't have information on the default schema.
(
"SELECT * FROM schema.table_name",
"table_name",
"id=42",
"SELECT * FROM schema.table_name WHERE table_name.id=42",
),
(
"SELECT * FROM schema.table_name",
"schema.table_name",
"id=42",
"SELECT * FROM schema.table_name WHERE schema.table_name.id=42",
),
(
"SELECT * FROM table_name",
"schema.table_name",
"id=42",
"SELECT * FROM table_name WHERE schema.table_name.id=42",
),
],
)
def test_insert_rls(
mocker: MockerFixture, sql: str, table: str, rls: str, expected: str
) -> None:
"""
Insert into a statement a given RLS condition associated with a table.
"""
condition = sqlparse.parse(rls)[0]
add_table_name(condition, table)
# pylint: disable=unused-argument
def get_rls_for_table(
candidate: Token, database_id: int, default_schema: str
) -> Optional[TokenList]:
"""
Return the RLS ``condition`` if ``candidate`` matches ``table``.
"""
# compare ignoring schema
for left, right in zip(str(candidate).split(".")[::-1], table.split(".")[::-1]):
if left != right:
return None
return condition
mocker.patch("superset.sql_parse.get_rls_for_table", new=get_rls_for_table)
statement = sqlparse.parse(sql)[0]
assert (
str(
insert_rls(token_list=statement, database_id=1, default_schema="my_schema")
).strip()
== expected.strip()
)
@pytest.mark.parametrize(
"rls,table,expected",
[
("id=42", "users", "users.id=42"),
("users.id=42", "users", "users.id=42"),
("schema.users.id=42", "users", "schema.users.id=42"),
("false", "users", "false"),
],
)
def test_add_table_name(rls: str, table: str, expected: str) -> None:
condition = sqlparse.parse(rls)[0]
add_table_name(condition, table)
assert str(condition) == expected
def test_get_rls_for_table(mocker: MockerFixture, app_context: None) -> None:
"""
Tests for ``get_rls_for_table``.
"""
candidate = Identifier([Token(Name, "some_table")])
db = mocker.patch("superset.db")
dataset = db.session.query().filter().one_or_none()
dataset.__str__.return_value = "some_table"
dataset.get_sqla_row_level_filters.return_value = [text("organization_id = 1")]
assert (
str(get_rls_for_table(candidate, 1, "public"))
== "some_table.organization_id = 1"
)
dataset.get_sqla_row_level_filters.return_value = [
text("organization_id = 1"),
text("foo = 'bar'"),
]
assert (
str(get_rls_for_table(candidate, 1, "public"))
== "some_table.organization_id = 1 AND some_table.foo = 'bar'"
)
dataset.get_sqla_row_level_filters.return_value = []
assert get_rls_for_table(candidate, 1, "public") is None
| 25.222978 | 103 | 0.575398 |
4a1ffe00c929573ecdd02eb026c0124963f91ff0 | 14,978 | py | Python | intro_tab_functions.py | StineGustavsen/Fidora | 005afc4ce1c4980c772b52ab6db13172048ac622 | [
"MIT"
] | null | null | null | intro_tab_functions.py | StineGustavsen/Fidora | 005afc4ce1c4980c772b52ab6db13172048ac622 | [
"MIT"
] | null | null | null | intro_tab_functions.py | StineGustavsen/Fidora | 005afc4ce1c4980c772b52ab6db13172048ac622 | [
"MIT"
] | null | null | null | import Globals
import tkinter as tk
import tkinter.ttk
from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, \
PhotoImage, BOTH, Toplevel, GROOVE, ACTIVE, FLAT, N, S, W, E, ALL, ttk, LEFT, RIGHT, Y,\
Label, X, END, Button, StringVar, PhotoImage
#import sympy as sp
#from io import BytesIO
import cv2
import numpy as np
import os
from os.path import normpath, basename
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
#matplotlib.rcParams['text.usetex'] = True #lagt til for å kunne skrive latex i string
from scipy.optimize import curve_fit
from scipy.optimize import curve_fit, OptimizeWarning
from PIL import Image, ImageTk
import sys
from datetime import datetime
import re
import warnings
warnings.filterwarnings("error")
def readMore():
return
def createCalibrationWindow():
new_window = tk.Toplevel(Globals.tab2)
new_window.geometry("500x500") #("360x500")
new_window.grab_set()
new_window_frame = tk.Frame(new_window)
new_window_frame.config(relief=FLAT, bg='#ffffff', highlightthickness=0)
new_window_scroll_canvas = tk.Canvas(new_window_frame)
new_window_scroll_canvas.config(bg='#ffffff', height=450, width=200)
new_window_scroll_canvas.grid_propagate(0)
new_window_scroll = ttk.Scrollbar(new_window_frame, command=new_window_scroll_canvas.yview)
scrollable_frame= tk.Frame(new_window_scroll_canvas)
scrollable_frame.bind("<Configure>", lambda e: new_window_scroll_canvas.configure(scrollregion=new_window_scroll_canvas.bbox('all')))
new_window_scroll_canvas.create_window((0,0), window=scrollable_frame, anchor='nw')
new_window_scroll_canvas.configure(yscrollcommand=new_window_scroll.set)
new_window_canvas = tk.Canvas(scrollable_frame)
new_window_canvas.config(relief=FLAT, bg='#ffffff', highlightthickness=0)
new_window_canvas.pack(fill=BOTH, expand=True)
new_window_frame.pack(expand=True, fill = BOTH)
new_window_scroll_canvas.pack(side=LEFT, fill=BOTH, expand=True)
new_window_scroll.pack(side=RIGHT, fill=Y)
#make frame for text box 1
frame1 = tk.Frame(new_window_canvas, height=400, width=600)
frame1.grid(row=0, column=0, pady=(10,10), padx=(10,10))
frame1.config(bd=0, bg='#E5f9ff')
#insert text
t1 =tk.Text(frame1)
t1.grid(in_=frame1,row=0,column=0)
t1.config(bg='#E5f9ff',fg='#130E07', font=('calibri', '11'))
txt1="""
Calibration...
"""
t1.insert(END,txt1)
t1.config(state=DISABLED) #must be done after the text is inserted
#insert image 1 : orientering
scan_box_figure = Image.open("orientering.PNG")
scan_box_figure = scan_box_figure.resize((300, 200), Image.ANTIALIAS) #(width, height)
scan_figure = ImageTk.PhotoImage(scan_box_figure)
scan_figure_label = Label(new_window_canvas, image=scan_figure)
scan_figure_label.image = scan_figure
scan_figure_label.grid(row=0,column=1, columnspan=2,sticky=N+S+W+E, pady=(0,10))
scan_figure_label.config(bg='#FFF')#,height=10, width=10)
#####################################################################################33
def createRaystationWindow():
new_window = tk.Toplevel(Globals.tab2)
new_window.geometry("500x500") #("360x500")
new_window.grab_set()
new_window_frame = tk.Frame(new_window)
new_window_frame.config(relief=FLAT, bg='#ffffff', highlightthickness=0)
new_window_scroll_canvas = tk.Canvas(new_window_frame)
new_window_scroll_canvas.config(bg='#ffffff', height=450, width=200)
new_window_scroll_canvas.grid_propagate(0)
new_window_scroll = ttk.Scrollbar(new_window_frame, command=new_window_scroll_canvas.yview)
scrollable_frame= tk.Frame(new_window_scroll_canvas)
scrollable_frame.bind("<Configure>", lambda e: new_window_scroll_canvas.configure(scrollregion=new_window_scroll_canvas.bbox('all')))
new_window_scroll_canvas.create_window((0,0), window=scrollable_frame, anchor='nw')
new_window_scroll_canvas.configure(yscrollcommand=new_window_scroll.set)
new_window_canvas = tk.Canvas(scrollable_frame)
new_window_canvas.config(relief=FLAT, bg='#ffffff', highlightthickness=0)
new_window_canvas.pack(fill=BOTH, expand=True)
new_window_frame.pack(expand=True, fill = BOTH)
new_window_scroll_canvas.pack(side=LEFT, fill=BOTH, expand=True)
new_window_scroll.pack(side=RIGHT, fill=Y)
#make frame for text box 1
frame1 = tk.Frame(new_window_canvas, height=400, width=600)
frame1.grid(row=0, column=0, pady=(10,10), padx=(10,10))
frame1.config(bd=0, bg='#E5f9ff')
#insert text
t1 =tk.Text(frame1)
t1.grid(in_=frame1,row=0,column=0)
t1.config(bg='#E5f9ff',fg='#130E07', font=('calibri', '11'))
txt1="""
Raystation...
"""
t1.insert(END,txt1)
t1.config(state=DISABLED) #must be done after the text is inserted
#insert image 1 : orientering
scan_box_figure = Image.open("orientering.PNG")
scan_box_figure = scan_box_figure.resize((300, 200), Image.ANTIALIAS) #(width, height)
scan_figure = ImageTk.PhotoImage(scan_box_figure)
scan_figure_label = Label(new_window_canvas, image=scan_figure)
scan_figure_label.image = scan_figure
scan_figure_label.grid(row=0,column=1, columnspan=2,sticky=N+S+W+E, pady=(0,10))
scan_figure_label.config(bg='#FFF')#,height=10, width=10)
#####################################################################################
def createScannerSettingsWindow():
new_window = tk.Toplevel(Globals.tab2)
new_window.geometry("500x500") #("360x500")
new_window.grab_set()
new_window_frame = tk.Frame(new_window)
new_window_frame.config(relief=FLAT, bg='#ffffff', highlightthickness=0)
new_window_scroll_canvas = tk.Canvas(new_window_frame)
new_window_scroll_canvas.config(bg='#ffffff', height=450, width=200)
new_window_scroll_canvas.grid_propagate(0)
new_window_scroll = ttk.Scrollbar(new_window_frame, command=new_window_scroll_canvas.yview)
scrollable_frame= tk.Frame(new_window_scroll_canvas)
scrollable_frame.bind("<Configure>", lambda e: new_window_scroll_canvas.configure(scrollregion=new_window_scroll_canvas.bbox('all')))
new_window_scroll_canvas.create_window((0,0), window=scrollable_frame, anchor='nw')
new_window_scroll_canvas.configure(yscrollcommand=new_window_scroll.set)
new_window_canvas = tk.Canvas(scrollable_frame)
new_window_canvas.config(relief=FLAT, bg='#ffffff', highlightthickness=0)
new_window_canvas.pack(fill=BOTH, expand=True)
new_window_frame.pack(expand=True, fill = BOTH)
new_window_scroll_canvas.pack(side=LEFT, fill=BOTH, expand=True)
new_window_scroll.pack(side=RIGHT, fill=Y)
#make frame for text box 1
frame1 = tk.Frame(new_window_canvas, height=400, width=600)
frame1.grid(row=0, column=0, pady=(10,10), padx=(10,10))
frame1.config(bd=0, bg='#E5f9ff')
#insert text
t1 =tk.Text(frame1)
t1.grid(in_=frame1,row=0,column=0)
t1.config(bg='#E5f9ff',fg='#130E07', font=('calibri', '11'))
txt1="""
In order to use FIDORA and get reliable results, one should make sure that all these
steps and settings are followed.
Step 1: Before irradiation \n
Step 2: Working with the scanner \n
Step 3: Scanner settings \n
Step 4: Scanning \n
Step 1: Before irradiation
Before irradiation of the GafChromic EBT3 film, remember to indicate which direction
is landscape direction on the film. Each film or film fragment must be marked with an
orientation. Film should always keep the same orientation (portrait or landscape) on
the scanner, and in this program you must use landscape orientation. Use the marks
to place films consistently on the scanner.
"""
t1.insert(END,txt1)
t1.config(state=DISABLED) #must be done after the text is inserted
#insert image 1 : orientering
scan_box_figure = Image.open("orientering.PNG")
scan_box_figure = scan_box_figure.resize((300, 200), Image.ANTIALIAS) #(width, height)
scan_figure = ImageTk.PhotoImage(scan_box_figure)
scan_figure_label = Label(new_window_canvas, image=scan_figure)
scan_figure_label.image = scan_figure
scan_figure_label.grid(row=0,column=1, columnspan=2,sticky=N+S+W+E, pady=(0,10))
scan_figure_label.config(bg='#FFF')#,height=10, width=10)
#####################################################################################33
#make frame2
frame2 = tk.Frame(new_window_canvas, height=400, width=600)
frame2.grid(row=1, column=0, pady=(10,10), padx=(10,10))
frame2.config(bd=0, bg='#E5f9ff')
#insert text 2
t2 =tk.Text(frame2)
t2.grid(in_=frame2,row=0,column=0)
t2.config(bg='#E5f9ff',font=('calibri', '11'))
txt2="""
Step 2: Working with the scanner
After irradiating the GafChromic EBT3 film, it should be scanned at least 12
hours later, with an Epson V750 Pro flat-bed scanner. Center the film on the
scanner with a frame. The frame should preferably be of GafChromic film as well,
to achieve equal light conditions.In addition to the frame, it is important to
use a transparent compression (glass) sheet on top of the film to avoid film
curling. This is important to achieve equal optical densities throughout the
scanner surface. Also, remember to position the glass sheet so that it covers
the entire calibration area (the innermost area of the scanner surface).
Otherwise one can experience artifacts such as banding. If one wants to change
the glass sheet being used, it must be verified before use. That is, it must be
checked that the glass sheet itself does not introduce great errors in the
scanner readout.
The scanner surface and the glass sheet must be cleaned before scanning, and
this can be done using regular lens wipes. This is to prevent dust or other
contamination to introduce errors in the scanned image.
The Epson V750 Pro software should be installed, and can be found here:
https://www.epson.no/products/scanners/consumer-scanners/epson-perfection-
v750-pro/Stotte-og-nedlastinger
"""
t2.insert(END,txt2)
t2.config(state=DISABLED) #must be done after the text is inserted
#insert image 2a : epson_v750_pro
scan_box_figure = Image.open("epson_v750_pro.PNG")
scan_box_figure = scan_box_figure.resize((300, 300), Image.ANTIALIAS) #(width, height)
scan_figure = ImageTk.PhotoImage(scan_box_figure)
scan_figure_label = Label(new_window_canvas, image=scan_figure)
scan_figure_label.image = scan_figure
scan_figure_label.grid(row=1,column=1, columnspan=1,sticky=N+S+W+E, pady=(0,10))
scan_figure_label.config(bg='#FFF')#,height=10, width=10)
#insert image 2b : scanner_img
scan_box_figure = Image.open("scanner_img.PNG")
scan_box_figure = scan_box_figure.resize((300, 300), Image.ANTIALIAS) #(width, height)
scan_figure = ImageTk.PhotoImage(scan_box_figure)
scan_figure_label = Label(new_window_canvas, image=scan_figure)
scan_figure_label.image = scan_figure
scan_figure_label.grid(row=1,column=2, columnspan=1,sticky=N+S+W+E, pady=(0,10))
scan_figure_label.config(bg='#FFF')#,height=10, width=10)
#make frame 3
frame3 = tk.Frame(new_window_canvas, height=400, width=600)
frame3.grid(row=2, column=0, pady=(10,10), padx=(10,10))
frame3.config(bd=0, bg='#E5f9ff')
#insert text 3
t3 =tk.Text(frame3)
t3.grid(in_=frame3,row=0,column=0)
t3.config(bg='#E5f9ff',font=('calibri', '11'))
#text 3
txt3="""
Step 3: Scanner settings
After installation, make sure the scanner software settings are correct:
1. Mode: Professional mode
2. Image type: 48-bit Color
3. Resolution: 127 dpi
4. Document size: W: 119.4 mm, H: 118.1 mm
5. Target size: Original
6. Adjustments: No adjustments should be made, as this might interfere with the
correction made in this program.
Make sure your settings are in accordance with the image to the right.
"""
t3.insert(END,txt3)
t3.config(state=DISABLED) #must be done after the text is inserted
#insert image 3
scan_box_figure = Image.open("epsonScan.PNG")
scan_box_figure = scan_box_figure.resize((240, 400), Image.ANTIALIAS) #(width, height)
scan_figure = ImageTk.PhotoImage(scan_box_figure)
scan_figure_label = Label(new_window_canvas, image=scan_figure)
scan_figure_label.image = scan_figure
scan_figure_label.grid(row=2,column=1, columnspan=2,sticky=N+S+W+E, pady=(0,10))
scan_figure_label.config(bg='#FFF')#,height=10, width=10)
##############################################################################
#make frame 4
frame4 = tk.Frame(new_window_canvas, height=400, width=600)
frame4.grid(row=3, column=0, pady=(10,10), padx=(10,10))
frame4.config(bd=0, bg='#E5f9ff')
#insert text 4
t4 =tk.Text(frame4)
t4.grid(in_=frame4,row=0,column=0)
t4.config(bg='#E5f9ff',font=('calibri', '11'))
txt4="""
Step 4: Scanning
When all settings are correct, you are ready to scan. First perform 3-5 warm-up
scans, in order to stabilize the light source. This can be done by pressing “Scan”
about 4 times, and storing the image files in an appropriate folder. Make sure to
store the file as Multi-TIFF (*.tif). When scanning, try to make another scan every
minute. At this rate, the scanner light source will keep stable, and neither warm-up
nor cool down, and thus create similar lighting conditions at each scan.
After performing 3-5 warm-up scans you are ready to scan the actual scans that can
be used in further analysis. Press “Scan”, and again make sure you store the file
as a Multi-TIFF file. Remember, to always scan approximately every minute. If many
minutes passes without scanning, one should perform an additional round of 3-5
warm-up scans to stabilize the light source once again.
"""
t4.insert(END,txt4)
t4.config(state=DISABLED)
#insert image 4
scan_box_figure = Image.open("fileSaveSetting.PNG")
scan_box_figure = scan_box_figure.resize((400, 400), Image.ANTIALIAS) #(width, height)
scan_figure = ImageTk.PhotoImage(scan_box_figure)
scan_figure_label = Label(new_window_canvas, image=scan_figure)
scan_figure_label.image = scan_figure
scan_figure_label.grid(row=3,column=1, columnspan=2,sticky=N+S+W+E, pady=(0,10))
scan_figure_label.config(bg='#FFF')#,height=10, width=10) | 46.086154 | 138 | 0.697022 |
4a1ffedd020c5c84c407c83387b5e7c141fc7dde | 602 | py | Python | original_files/boot_original.py | lemariva/uPyMaixYoloV2 | 0140c13cbffef780cd06fd4206926a9d46ee8da8 | [
"Apache-2.0"
] | 5 | 2020-01-17T09:03:52.000Z | 2022-03-21T10:34:23.000Z | original_files/boot_original.py | lemariva/uPyMaixYoloV2 | 0140c13cbffef780cd06fd4206926a9d46ee8da8 | [
"Apache-2.0"
] | null | null | null | original_files/boot_original.py | lemariva/uPyMaixYoloV2 | 0140c13cbffef780cd06fd4206926a9d46ee8da8 | [
"Apache-2.0"
] | 3 | 2020-07-07T14:41:41.000Z | 2022-03-21T10:34:27.000Z |
from fpioa_manager import *
import os, Maix, lcd, image
from Maix import FPIOA, GPIO
test_pin=16
fpioa = FPIOA()
fpioa.set_function(test_pin,FPIOA.GPIO7)
test_gpio=GPIO(GPIO.GPIO7,GPIO.IN)
lcd.init(color=(255,0,0))
lcd.draw_string(100,120, "Welcome to MaixPy", lcd.WHITE, lcd.RED)
if test_gpio.value() == 0:
print('PIN 16 pulled down, enter test mode')
import sensor
import image
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.run(1)
lcd.freq(16000000)
while True:
img=sensor.snapshot()
lcd.display(img) | 26.173913 | 65 | 0.702658 |
4a2001c5491746735d896b80b3536eecb2a7fae8 | 26,205 | py | Python | ipsframework/taskManager.py | rosswhitfield/IPS-framework | c10e1e8a75157a79324ea6a061a3e89fb8f33cb6 | [
"BSD-3-Clause"
] | null | null | null | ipsframework/taskManager.py | rosswhitfield/IPS-framework | c10e1e8a75157a79324ea6a061a3e89fb8f33cb6 | [
"BSD-3-Clause"
] | null | null | null | ipsframework/taskManager.py | rosswhitfield/IPS-framework | c10e1e8a75157a79324ea6a061a3e89fb8f33cb6 | [
"BSD-3-Clause"
] | null | null | null | # -------------------------------------------------------------------------------
# Copyright 2006-2021 UT-Battelle, LLC. See LICENSE for more information.
# -------------------------------------------------------------------------------
import os
from math import ceil
from . import messages, configurationManager
from .ipsExceptions import BlockedMessageException, \
IncompleteCallException, \
InsufficientResourcesException, \
BadResourceRequestException, \
ResourceRequestMismatchException
from .ipsutil import which
class TaskManager:
"""
The task manager is responsible for facilitating component method
invocations, and the launching of tasks.
"""
# TM __init__
def __init__(self, fwk):
# ref to framework
self.fwk = fwk
self.event_mgr = None
self.data_mgr = None
self.resource_mgr = None
self.config_mgr = None
self.host = None
self.comp_registry = configurationManager.ComponentRegistry()
self.service_methods = ['init_call',
'launch_task',
# 'launchTask', --- deprecated
'wait_call',
'init_task',
'init_task_pool',
'finish_task']
# **** this si where service methods are registered
self.fwk.register_service_handler(self.service_methods,
getattr(self, 'process_service_request'))
self.task_map = {}
self.task_launch_cmd = ''
# table of currently running tasks
self.curr_task_table = {}
# nextCall
self.next_call_id = 1
self.next_task_id = 1
self.outstanding_calls = {}
self.finished_calls = {}
self.mpicmd = None # USed only for CCM on edison
# this is where messages are received and then something smart happens
def process_service_request(self, msg):
"""
Invokes the appropriate public data manager method for the component
specified in *msg*. Return method's return value.
"""
self.fwk.debug('Task Manager received message: %s', str(msg.__dict__))
method = getattr(self, msg.target_method)
retval = method(msg)
return retval
# TM initialize
def initialize(self, data_mgr, resource_mgr, config_mgr):
"""
Initialize references to other managers and key values from
configuration manager.
"""
self.event_mgr = None # eventManager(self)
self.data_mgr = data_mgr
self.resource_mgr = resource_mgr
self.config_mgr = config_mgr
self.host = self.config_mgr.get_platform_parameter('HOST')
try:
self.task_launch_cmd = self.config_mgr.get_platform_parameter('MPIRUN')
except Exception:
print('Error accessing platform parameter MPIRUN')
raise
# do later - subscribe to events, set up event publishing structure
# publish "TM initialized" event
def get_call_id(self):
"""
Return a new call id
"""
retval = self.next_call_id
self.next_call_id = self.next_call_id + 1
return retval
def get_task_id(self):
"""
Return a new task id
"""
retval = self.next_task_id
self.next_task_id = self.next_task_id + 1
return retval
def printCurrTaskTable(self):
"""
Prints the task table pretty-like.
"""
ctt = self.curr_task_table
for c, i in ctt.items():
print(c)
for k, v in i.items():
print(" ", k, "=", v)
print("------")
print("=====================")
# TM call
def init_call(self, init_call_msg, manage_return=True):
r"""
Creates and sends a :py:obj:`messages.MethodInvokeMessage` from
the calling component
to the target component. If *manage_return* is ``True``, a record is
added to *outstanding_calls*. Return call id.
Message args:
0. method_name
1. \+ arguments to be passed on as method arguments.
"""
callee_id = init_call_msg.target_comp_id
method_name = init_call_msg.args[0]
args = init_call_msg.args[1:]
keywords = init_call_msg.keywords
caller_id = init_call_msg.sender_id
call_id = self.get_call_id()
self.fwk.debug('TM:init_call(): %s %s %s %s',
caller_id, callee_id, method_name, str(args))
invoke_msg = messages.MethodInvokeMessage(self.fwk.component_id,
callee_id,
call_id,
method_name, *args, **keywords)
invocation_q = self.comp_registry.getComponentArtifact(callee_id,
'invocation_q')
invocation_q.put(invoke_msg)
if manage_return:
self.outstanding_calls[call_id] = (caller_id, None)
return call_id
def return_call(self, response_msg):
"""
Handle the response message generated by a component in response
to a method invocation on that component.
*reponse_msg* is expected to be of type :py:obj:`messages.MethodResultMessage`
"""
call_id = response_msg.call_id
caller_id = self.outstanding_calls[call_id][0]
self.fwk.debug('TM:call_return() call_id = %s caller_id = %s', call_id, caller_id)
self.finished_calls[call_id] = (caller_id, response_msg)
del self.outstanding_calls[call_id]
def wait_call(self, wait_msg):
"""
Determine if the call has finished. If finished, return any data or
errors. If not finished raise the appropriate blocking or nonblocking
exception and try again later.
*wait_msg* is expected to be of type :py:obj:`messages.ServiceRequestMessage`
Message args:
0. *call_id*: call id for which to wait
1. *blocking*: determines the wait is blocking or not
"""
call_id = wait_msg.args[0]
blocking = wait_msg.args[1]
self.fwk.debug('TM:wait_call() call_id = %s', call_id)
if call_id in self.finished_calls:
response_msg = self.finished_calls[call_id][1]
del self.finished_calls[call_id]
if response_msg.status == messages.Message.FAILURE:
raise Exception(response_msg.args[0])
else:
return response_msg.args
if not blocking:
raise IncompleteCallException(call_id)
else:
raise BlockedMessageException(wait_msg, '***call %s not finished' % call_id)
def init_task(self, init_task_msg):
r"""
Allocate resources needed for a new task and build the task
launch command using the binary and arguments provided by
the requesting component. Return launch command to component via
:py:obj:`messages.ServiceResponseMessage`. Raise exception if task
can not be launched at this time (:py:exc:`ipsExceptions.BadResourceRequestException`, :py:exc:`ipsExceptions.InsufficientResourcesException`).
*init_task_msg* is expected to be of type :py:obj:`messages.ServiceRequestMessage`
Message args:
0. *nproc*: number of processes the task needs
1. *binary*: full path to the executable to launch
# SIMYAN: added this to deal with the component directory change
2. *working_dir*: full path to directory where the task will be launched
3. *tppn*: processes per node for this task. (0 indicates that the default ppn is used.)
4. *block*: whether or not to wait until the task can be launched.
5. *wnodes*: ``True`` for whole node allocation, ``False`` otherwise.
6. *wsocks*: ``True`` for whole socket allocation, ``False`` otherwise.
7. \+ *cmd_args*: any arguments for the executable
"""
caller_id = init_task_msg.sender_id
nproc = int(init_task_msg.args[0])
binary = init_task_msg.args[1]
# SIMYAN: working_dir stored
working_dir = init_task_msg.args[2]
tppn = int(init_task_msg.args[3]) # task processes per node
block = init_task_msg.args[4] # Block waiting for available resources
wnodes = init_task_msg.args[5]
wsocks = init_task_msg.args[6]
tcpp = init_task_msg.args[7]
# SIMYAN: increased arguments
cmd_args = init_task_msg.args[8:]
try:
return self._init_task(caller_id, nproc, binary, working_dir, tppn, tcpp, wnodes, wsocks, cmd_args)
except InsufficientResourcesException:
if block:
raise BlockedMessageException(init_task_msg, '***%s waiting for %d resources' %
(caller_id, nproc))
else:
raise
except BadResourceRequestException as e:
self.fwk.error("There has been a fatal error, %s requested %d too many processors in task %d",
caller_id, e.deficit, e.task_id)
raise
except ResourceRequestMismatchException as e:
self.fwk.error("There has been a fatal error, %s requested too few processors per node to launch task %d (requested: procs = %d, ppn = %d)",
caller_id, e.task_id, e.nproc, e.ppn)
raise
except Exception:
raise
def _init_task(self, caller_id, nproc, binary, working_dir, tppn, tcpp, wnodes, wsocks, cmd_args):
# handle for task related things
task_id = self.get_task_id()
retval = self.resource_mgr.get_allocation(caller_id,
nproc,
task_id,
wnodes,
wsocks,
task_ppn=tppn,
task_cpp=tcpp)
self.fwk.debug('RM: get_allocation() returned %s', str(retval))
partial_node = retval[0]
if partial_node:
(nodelist, corelist, ppn, max_ppn, accurateNodes) = retval[1:]
else:
(nodelist, ppn, max_ppn, cpp, accurateNodes) = retval[1:]
if partial_node:
nodes = ','.join(nodelist)
(cmd, env_update) = self.build_launch_cmd(nproc, binary, cmd_args,
working_dir, ppn,
max_ppn, nodes,
accurateNodes,
partial_node, task_id,
core_list=corelist)
else:
if accurateNodes:
nodes = ','.join(nodelist)
else:
nodes = ''
(cmd, env_update) = self.build_launch_cmd(nproc, binary, cmd_args,
working_dir, ppn,
max_ppn, nodes,
accurateNodes,
False, task_id,
cpp)
self.curr_task_table[task_id] = {'component': caller_id,
'status': 'init_task',
'binary': binary,
'nproc': nproc,
'args': cmd_args,
'launch_cmd': cmd,
'env_update': env_update}
return (task_id, cmd, env_update)
def build_launch_cmd(self, nproc, binary, cmd_args, working_dir, ppn,
max_ppn, nodes, accurateNodes, partial_nodes,
task_id, cpp=0, core_list=''):
"""
Construct task launch command to be executed by the component.
* nproc - number of processes to use
* binary - binary to launch
* cmd_args - additional command line arguments for the binary
* working_dir - full path to where the executable will be launched
* ppn - processes per node value to use
* max_ppn - maximum possible ppn for this allocation
* nodes - comma separated list of node ids
* accurateNodes - if ``True``, launch on nodes in *nodes*, otherwise the parallel launcher determines the process placement
* partial_nodes - if ``True`` and *accurateNodes* and *task_launch_cmd* == 'mpirun',
a host file is created specifying the exact placement of processes on cores.
* core_list - used for creating host file with process to core mappings
"""
# set up launch command
env_update = None
nproc_flag = ''
smp_node = len(self.resource_mgr.nodes) == 1
if self.task_launch_cmd == 'eval':
# cmd = binary
if len(cmd_args) > 0:
cmd_args = ' '.join(cmd_args)
cmd = ' '.join([binary, cmd_args])
else:
cmd = binary
return cmd, env_update
# -------------------------------------
# mpirun
# -------------------------------------
elif self.task_launch_cmd == 'mpirun':
version = self.config_mgr.get_platform_parameter('MPIRUN_VERSION').upper()
if version.startswith("OPENMPI"):
if version == 'OPENMPI-DVM':
mpi_binary = 'prun'
smp_node = False
else: # VERSION = OPENMPI_GENERIC
mpi_binary = 'mpirun'
# Find and cache full path to launch executable
if not self.mpicmd:
self.mpicmd = which(mpi_binary)
mpicmd = self.mpicmd
if not mpicmd:
raise Exception('Missing %s command in $PATH' % (mpi_binary))
nproc_flag = '-np'
ppn_flag = '-npernode'
host_select = '-H'
if smp_node or mpi_binary == 'prun':
cmd = ' '.join([mpicmd,
nproc_flag, str(nproc)])
else:
cmd = ' '.join([mpicmd,
nproc_flag, str(nproc),
ppn_flag, str(ppn)])
cmd = f"{cmd} -x PYTHONPATH" # Propagate PYTHONPATH to compute nodes
if accurateNodes:
cmd = ' '.join([cmd, host_select, nodes])
elif version == 'SGI':
if accurateNodes:
core_dict = {}
ppn_groups = {}
num_cores = self.resource_mgr.cores_per_socket
for (n, cl) in core_list:
core_dict.update({n: cl})
if len(cl) in ppn_groups:
ppn_groups[len(cl)].append(n)
else:
ppn_groups.update({len(cl): [n]})
cmdlets = []
envlets = []
bin_n_args = ' '.join([binary, *cmd_args])
for p, ns in ppn_groups.items():
cmdlets.append(' '.join([','.join(ns), str(p),
bin_n_args]))
el_node = []
for n in ns:
el_tmp = []
for k in core_dict[n]:
s, c = k.split(':')
s = int(s)
c = int(c)
el_tmp.append(str(s * num_cores + c))
el_node.append(','.join(el_tmp))
envlets.append(':'.join(el_node))
cmd = self.task_launch_cmd + ' ' + ' : '.join(cmdlets)
env_update = {'MPI_DSM_CPULIST': ':'.join(envlets)}
return cmd, env_update
else:
cmd = ' '.join([self.task_launch_cmd, str(ppn), binary,
' '.join(cmd_args)])
# --------------------------------------
# mpiexec (MPICH variants)
# --------------------------------------
elif self.task_launch_cmd == 'mpiexec':
nproc_flag = '-n'
ppn_flag = '-npernode'
if smp_node:
cmd = ' '.join([self.task_launch_cmd, nproc_flag, str(nproc)])
elif self.host == 'iter':
cfg_fname = ".node_config_" + str(task_id)
cfg_fname = os.path.join(working_dir, cfg_fname)
cfg_file = open(cfg_fname, 'w')
cmd_args = ' '.join(cmd_args)
node_command = ' '.join([binary, cmd_args])
node_spec = ''
if partial_nodes:
for (node, cores) in core_list:
node_spec += ('%s ' % (node)) * len(cores)
else:
for node in nodes.split(' ,'):
node_spec += ('%s ' % (node)) * ppn
print('%s: %s' % (node_spec, node_command), file=cfg_file)
config_option = '-config=' + cfg_fname
cmd = ' '.join([self.task_launch_cmd, config_option])
self.curr_task_table[task_id]['node_file'] = cfg_fname
return cmd, env_update
elif accurateNodes: # Need to assign tasks to nodes explicitly
host_select = '--host ' + nodes
cmd = ' '.join([self.task_launch_cmd, host_select,
nproc_flag, str(nproc), ppn_flag,
str(ppn)])
else:
cmd = ' '.join([self.task_launch_cmd, nproc_flag,
str(nproc), ppn_flag, str(ppn)])
# ------------------------------------
# aprun (Cray parallel launch)
# ------------------------------------
elif self.task_launch_cmd == 'aprun':
nproc_flag = '-n'
ppn_flag = '-N'
cpu_assign_flag = '-cc'
by_numanode_flag = '-S'
if self.host in ['hopper', 'edison']:
num_numanodes = self.resource_mgr.sockets_per_node
num_cores = self.resource_mgr.cores_per_node
if accurateNodes:
nlist_flag = '-L'
num_nodes = len(nodes.split(','))
ppn = int(ceil(float(nproc) / num_nodes))
per_numa = int(ceil(float(ppn) / num_numanodes))
if per_numa == num_cores / num_numanodes:
cmd = ' '.join([self.task_launch_cmd,
nproc_flag, str(nproc),
ppn_flag, str(ppn),
nlist_flag, nodes])
else:
if num_nodes > 1:
ppn = per_numa * num_numanodes
if nproc < ppn:
ppn = nproc
cmd = ' '.join([self.task_launch_cmd,
nproc_flag, str(nproc),
ppn_flag, str(ppn),
by_numanode_flag, str(per_numa),
nlist_flag, nodes])
else:
num_nodes = int(ceil(float(nproc) / ppn))
ppn = int(ceil(float(nproc) / num_nodes))
per_numa = int(ceil(float(ppn) / num_numanodes))
if per_numa == self.resource_mgr.cores_per_node / self.resource_mgr.sockets_per_node:
cmd = ' '.join([self.task_launch_cmd,
nproc_flag, str(nproc),
ppn_flag, str(ppn)])
else:
if num_nodes > 1:
ppn = per_numa * num_numanodes
if nproc < ppn:
ppn = nproc
cmd = ' '.join([self.task_launch_cmd,
nproc_flag, str(nproc),
ppn_flag, str(ppn),
by_numanode_flag, str(per_numa)])
else:
if accurateNodes:
nlist_flag = '-L'
cmd = ' '.join([self.task_launch_cmd,
nproc_flag, str(nproc),
ppn_flag, str(ppn),
nlist_flag, nodes])
else:
cmd = ' '.join([self.task_launch_cmd,
nproc_flag, str(nproc),
cpu_assign_flag,
'%d-%d' % (max_ppn - 1, max_ppn - int(ppn)),
ppn_flag, str(ppn)])
# ------------------------------------
# numactl (single process launcher)
# ------------------------------------
elif self.task_launch_cmd == 'numactl':
if accurateNodes and partial_nodes:
proc_flag = '--physcpubind='
procs = ''
for p in core_list:
procs = ','.join([k.split(':')[1] for k in p[1]])
proc_flag += procs
else:
self.fwk.warning('numactl needs accurateNodes')
proc_flag = ''
cmd = ' '.join([self.task_launch_cmd,
proc_flag])
elif self.task_launch_cmd == 'srun':
nproc_flag = '-n'
nnodes_flag = '-N'
num_nodes = len(nodes.split(','))
if partial_nodes:
cmd = ' '.join([self.task_launch_cmd,
nnodes_flag, str(num_nodes),
nproc_flag, str(nproc)])
else:
cpuptask_flag = '-c'
cpubind_flag = '--threads-per-core=1 --cpu-bind=cores'
cmd = ' '.join([self.task_launch_cmd,
nnodes_flag, str(num_nodes),
nproc_flag, str(nproc),
cpuptask_flag, str(cpp),
cpubind_flag])
env_update = {'OMP_PLACES': 'threads',
'OMP_PROC_BIND': 'spread',
'OMP_NUM_THREADS': str(cpp)}
else:
self.fwk.error("invalid task launch command.")
raise RuntimeError("invalid task launch command.")
cmd_args = ' '.join(cmd_args)
cmd = ' '.join([cmd, binary, cmd_args])
return cmd, env_update
def init_task_pool(self, init_task_msg):
"""
Allocate resources needed for a new task and build the task
launch command using the binary and arguments provided by
the requesting component.
*init_task_msg* is expected to be of type :py:obj:`messages.ServiceRequestMessage`
Message args:
0. *task_dict*: dictionary of task names and objects
"""
caller_id = init_task_msg.sender_id
task_dict = init_task_msg.args[0]
ret_dict = {}
for task_name in task_dict:
# handle for task related things
(nproc, working_dir, binary, cmd_args, tppn, wnodes, wsocks, tcpp) = task_dict[task_name]
try:
ret_dict[task_name] = self._init_task(caller_id, nproc, binary, working_dir, tppn, tcpp, wnodes, wsocks, cmd_args)
except InsufficientResourcesException:
continue
except BadResourceRequestException as e:
self.fwk.error("There has been a fatal error, %s requested %d too many processors in task %d",
caller_id, e.deficit, e.task_id)
for task_id, _, _ in ret_dict.values():
self.resource_mgr.release_allocation(task_id, -1)
del self.curr_task_table[task_id]
raise
except ResourceRequestMismatchException as e:
self.fwk.error("There has been a fatal error, %s requested too few processors per node to launch task %d (request: procs = %d, ppn = %d)",
caller_id, e.task_id, e.nproc, e.ppn)
for task_id, _, _ in ret_dict.values():
self.resource_mgr.release_allocation(task_id, -1)
del self.curr_task_table[task_id]
raise
except Exception:
self.fwk.exception('TM:init_task_pool(): Allocation exception')
raise
return ret_dict
def finish_task(self, finish_task_msg):
"""
Cleanup after a task launched by a component terminates
*finish_task_msg* is expected to be of type :py:obj:`messages.ServiceRequestMessage`
Message args:
0. *task_id*: task id of finished task
1. *task_data*: return code of task
"""
task_id = finish_task_msg.args[0]
task_data = finish_task_msg.args[1]
try:
self.resource_mgr.release_allocation(task_id, task_data)
del self.curr_task_table[task_id]
except Exception:
print('Error finishing task ', task_id)
raise
return 0
| 43.82107 | 154 | 0.493074 |
4a2001d32c01a9e5a23ee7d8658449ed7028a8c5 | 6,856 | py | Python | data/aligned_dataset.py | MTKSHU/cloth-segmentation | 241c6dfd51d89aedb02b67f48d296efe09698ddd | [
"MIT"
] | null | null | null | data/aligned_dataset.py | MTKSHU/cloth-segmentation | 241c6dfd51d89aedb02b67f48d296efe09698ddd | [
"MIT"
] | null | null | null | data/aligned_dataset.py | MTKSHU/cloth-segmentation | 241c6dfd51d89aedb02b67f48d296efe09698ddd | [
"MIT"
] | null | null | null | from data.base_dataset import BaseDataset, Rescale_fixed, Normalize_image
from data.image_folder import make_dataset, make_dataset_test
import os
import cv2
import json
import itertools
import collections
from tqdm import tqdm
import pandas as pd
import numpy as np
from PIL import Image
import torch
import torchvision.transforms as transforms
class AlignedDataset(BaseDataset):
def initialize(self, opt):
self.opt = opt
self.image_dir = opt.image_folder
self.df_path = opt.df_path
self.width = opt.fine_width
self.height = opt.fine_height
# for rgb imgs
transforms_list = []
transforms_list += [transforms.ToTensor()]
transforms_list += [Normalize_image(opt.mean, opt.std)]
self.transform_rgb = transforms.Compose(transforms_list)
self.df = pd.read_csv(self.df_path)
self.image_info = collections.defaultdict(dict)
self.df["CategoryId"] = self.df.ClassId.apply(lambda x: str(x).split("_")[0])
temp_df = (
self.df.groupby("ImageId")["EncodedPixels", "CategoryId"]
.agg(lambda x: list(x))
.reset_index()
)
size_df = self.df.groupby("ImageId")["Height", "Width"].mean().reset_index()
temp_df = temp_df.merge(size_df, on="ImageId", how="left")
for index, row in tqdm(temp_df.iterrows(), total=len(temp_df)):
image_id = row["ImageId"]
image_path = os.path.join(self.image_dir, image_id)
self.image_info[index]["image_id"] = image_id
self.image_info[index]["image_path"] = image_path
self.image_info[index]["width"] = self.width
self.image_info[index]["height"] = self.height
self.image_info[index]["labels"] = row["CategoryId"]
self.image_info[index]["orig_height"] = row["Height"]
self.image_info[index]["orig_width"] = row["Width"]
self.image_info[index]["annotations"] = row["EncodedPixels"]
self.dataset_size = len(self.image_info)
def __getitem__(self, index):
# load images ad masks
idx = index
img_path = self.image_info[idx]["image_path"]
img = Image.open(img_path).convert("RGB")
img = img.resize((self.width, self.height), resample=Image.BICUBIC)
image_tensor = self.transform_rgb(img)
info = self.image_info[idx]
mask = np.zeros(
(len(info["annotations"]), self.width, self.height), dtype=np.uint8
)
labels = []
for m, (annotation, label) in enumerate(
zip(info["annotations"], info["labels"])
):
sub_mask = self.rle_decode(
annotation, (info["orig_height"], info["orig_width"])
)
sub_mask = Image.fromarray(sub_mask)
sub_mask = sub_mask.resize(
(self.width, self.height), resample=Image.BICUBIC
)
mask[m, :, :] = sub_mask
labels.append(int(label) + 1)
num_objs = len(labels)
boxes = []
new_labels = []
new_masks = []
for i in range(num_objs):
try:
pos = np.where(mask[i, :, :])
xmin = np.min(pos[1])
xmax = np.max(pos[1])
ymin = np.min(pos[0])
ymax = np.max(pos[0])
if abs(xmax - xmin) >= 20 and abs(ymax - ymin) >= 20:
boxes.append([xmin, ymin, xmax, ymax])
new_labels.append(labels[i])
new_masks.append(mask[i, :, :])
except ValueError:
continue
if len(new_labels) == 0:
boxes.append([0, 0, 20, 20])
new_labels.append(0)
new_masks.append(mask[0, :, :])
nmx = np.zeros((len(new_masks), self.width, self.height), dtype=np.uint8)
for i, n in enumerate(new_masks):
nmx[i, :, :] = n
boxes = torch.as_tensor(boxes, dtype=torch.float32)
labels = torch.as_tensor(new_labels, dtype=torch.int64)
masks = torch.as_tensor(nmx, dtype=torch.uint8)
final_label = np.zeros((self.width, self.height), dtype=np.uint8)
# first_channel = np.zeros((self.width, self.height), dtype=np.uint8)
# second_channel = np.zeros((self.width, self.height), dtype=np.uint8)
# third_channel = np.zeros((self.width, self.height), dtype=np.uint8)
cloth_channel = np.zeros((self.width, self.height), dtype=np.uint8)
upperbody = [0, 1, 2, 3, 4, 5]
lowerbody = [6, 7, 8]
wholebody = [9, 10, 11, 12]
cloth = list(range(46))
for i in range(len(labels)):
# if labels[i] in upperbody:
# first_channel += new_masks[i]
# elif labels[i] in lowerbody:
# second_channel += new_masks[i]
# elif labels[i] in wholebody:
# third_channel += new_masks[i]
if labels[i] in cloth:
cloth_channel += new_masks[i]
# first_channel = (first_channel > 0).astype("uint8")
# second_channel = (second_channel > 0).astype("uint8")
# third_channel = (third_channel > 0).astype("uint8")
cloth_channel = (cloth_channel > 0).astype("uint8")
# final_label = first_channel + second_channel * 2 + third_channel * 3
final_label = cloth_channel
conflict_mask = (final_label <= 1).astype("uint8")
final_label = (conflict_mask) * final_label + (1 - conflict_mask) * 1
target_tensor = torch.as_tensor(final_label, dtype=torch.int64)
return image_tensor, target_tensor
def __len__(self):
return len(self.image_info)
def name(self):
return "AlignedDataset"
def rle_decode(self, mask_rle, shape):
"""
mask_rle: run-length as string formated: [start0] [length0] [start1] [length1]... in 1d array
shape: (height,width) of array to return
Returns numpy array according to the shape, 1 - mask, 0 - background
"""
shape = (int(shape[1]), int(shape[0]))
s = mask_rle.split()
# gets starts & lengths 1d arrays
starts, lengths = [np.asarray(x, dtype=int) for x in (s[0::2], s[1::2])]
starts -= 1
# gets ends 1d array
ends = starts + lengths
# creates blank mask image 1d array
img = np.zeros(shape[0] * shape[1], dtype=np.uint8)
# sets mark pixles
for lo, hi in zip(starts, ends):
img[lo:hi] = 1
# reshape as a 2d mask image
return img.reshape(shape).T # Needed to align to RLE direction
| 38.301676 | 102 | 0.563886 |
4a20032042177bb606f86a8f6a421e5a8b565506 | 9,817 | py | Python | history_generator/civil/treaty.py | ReedOei/History-Generator | 5c1c5df89d355736173cddf9c373b761fa4d1c3b | [
"MIT"
] | 19 | 2017-10-26T17:52:57.000Z | 2022-01-17T11:04:46.000Z | history_generator/civil/treaty.py | Kytuzian/History-Generator | 5c1c5df89d355736173cddf9c373b761fa4d1c3b | [
"MIT"
] | 4 | 2017-11-13T15:54:53.000Z | 2021-02-27T08:02:41.000Z | history_generator/civil/treaty.py | Kytuzian/History-Generator | 5c1c5df89d355736173cddf9c373b761fa4d1c3b | [
"MIT"
] | 5 | 2017-10-30T05:49:43.000Z | 2020-10-06T18:29:42.000Z | import math
import random
from civil.diplomacy import TRADE_CARAVAN_MULTIPLIER, WAR_CITY_MULTIPLIER, TRADE_END_DIVISOR, TREATY_NAMES
import internal.utility as utility
import internal.events as events
from culture.form import Form
class Treaty:
def __init__(self, parent, starting_date, nation_a, nation_b, treaty_type, signing_city=None, treaty_details=None):
if treaty_details is None:
treaty_details = {}
self.parent = parent
self.treaty_id = self.parent.get_next_id('treaty')
self.starting_date = starting_date
self.ending_date = None
self.nation_a = nation_a
self.nation_b = nation_b
self.names = {}
self.last_requested_date = self.starting_date
self.status_changed = False
if signing_city is not None:
self.signing_city = signing_city
else:
capitals = []
if nation_a.has_capital():
capitals.append(nation_a.get_capital())
elif nation_b.has_capital():
capitals.append(nation_b.get_capital())
if len(capitals) > 0:
self.signing_city = random.choice(capitals)
elif len(nation_a.cities + nation_b.cities) > 0:
self.signing_city = random.choice(nation_a.cities + nation_b.cities)
else:
self.signing_city = None
self.treaty_type = treaty_type
if treaty_details != {}:
self.treaty_details = treaty_details
else:
self.treaty_details = {}
if treaty_type == 'trade':
self.treaty_details[nation_a.id] = {}
self.treaty_details[nation_a.id]['caravans_sent'] = 0
self.treaty_details[nation_a.id]['caravans_received'] = 0
self.treaty_details[nation_a.id]['money'] = 0
self.treaty_details[nation_b.id] = {}
self.treaty_details[nation_b.id]['caravans_sent'] = 0
self.treaty_details[nation_b.id]['caravans_received'] = 0
self.treaty_details[nation_b.id]['money'] = 0
elif treaty_type == 'war':
self.treaty_details[nation_a.id] = {}
self.treaty_details[nation_a.id]['troops_killed'] = 0
self.treaty_details[nation_a.id]['troops_lost'] = 0
self.treaty_details[nation_a.id]['cities_conquered'] = 0
self.treaty_details[nation_a.id]['cities_lost'] = 0
self.treaty_details[nation_b.id] = {}
self.treaty_details[nation_b.id]['troops_killed'] = 0
self.treaty_details[nation_b.id]['troops_lost'] = 0
self.treaty_details[nation_b.id]['cities_conquered'] = 0
self.treaty_details[nation_b.id]['cities_lost'] = 0
self.is_current = True
def save(self, path):
res = {'id': self.treaty_id, 'starting_date': self.starting_date, 'ending_date': self.ending_date,
'nation_a': self.nation_a.id, 'nation_b': self.nation_b.id, 'names': self.names,
'last_requested_date': self.last_requested_date, 'status_changed': self.status_changed}
if self.signing_city is not None:
res['signing_city'] = self.signing_city.name
else:
res['signing_city'] = self.signing_city
res['treaty_type'] = self.treaty_type
res['treaty_details'] = self.treaty_details
res['is_current'] = self.is_current
with open(path + self.treaty_id + '.txt', 'w') as f:
f.write(str(res))
def __getitem__(self, key):
return self.treaty_details[key]
def length(self, current_date):
year_len, month_len, day_len = utility.get_time_span_length(self.starting_date, current_date)
# There are only 360 days in a year in our world, because life is so much simpler that way
actual_len = year_len + month_len / 12.0 + day_len / 360.0
# If treaty has only been in effect for 0 time, then let's just say it's been one day.
return max(actual_len, 1.0 / 360.0)
def get_score(self, nation, current_date):
if self.treaty_type == 'trade':
details = self.treaty_details[nation.id]
base_score = details['money'] + (details['caravans_received'] - details['caravans_sent']) * TRADE_CARAVAN_MULTIPLIER
return base_score / self.length(current_date)
elif self.treaty_type == 'war':
details = self.treaty_details[nation.id]
base_score = (details['cities_conquered'] - details['cities_lost']) * WAR_CITY_MULTIPLIER + (details['troops_killed'] - details['troops_lost'])
return base_score
def history_step(self, nation, current_date):
# Treaties must last at least one year to give them time to happen and such
if self.length(current_date) > 1 and self.is_current:
if self.treaty_type == 'trade':
# We can end a trade treaty with the agreement of only one side.
score = self.get_score(nation, current_date)
if score < 0:
end_chance = 0.5 + math.log(-score+1) / TRADE_END_DIVISOR
else:
end_chance = 0.5 - math.log(score+1) / TRADE_END_DIVISOR
# print('trade', end_chance)
val = random.random()
if val < end_chance:
self.end(current_date, ender=nation)
elif self.treaty_type == 'war':
# Both parties have to agree to end a war, obviously
a_score = self.get_score(self.nation_a, current_date)
if a_score < 0:
nation_a_end_chance = 0.5 + math.log(-a_score+1) / TRADE_END_DIVISOR
else:
nation_a_end_chance = 0.5 - math.log(a_score+1) / TRADE_END_DIVISOR
b_score = self.get_score(self.nation_b, current_date)
if b_score < 0:
nation_b_end_chance = 0.5 + math.log(-b_score+1) / TRADE_END_DIVISOR
else:
nation_b_end_chance = 0.5 - math.log(b_score+1) / TRADE_END_DIVISOR
end_chance = nation_a_end_chance * nation_b_end_chance
# print('war', end_chance)
val = random.random()
if val < end_chance:
self.end(current_date)
def get_other_nation(self, nation):
if nation == self.nation_a:
return self.nation_b
else:
return self.nation_a
def get_other_side_details(self, nation):
if nation == self.nation_a:
return self.treaty_details[self.nation_b.id]
else:
return self.treaty_details[self.nation_a.id]
def end(self, current_date, ender=None):
self.is_current = False
self.ending_date = current_date
self.status_changed = True
if self.treaty_type == 'trade':
self.nation_a.trading.remove(self.nation_b)
self.nation_b.trading.remove(self.nation_a)
if ender is None:
ender = self.nation_a
self.parent.event_log.add_event('DiplomacyTradeEnd',
{'nation_a': ender.id,
'nation_b': self.get_other_nation(ender).id},
self.parent.get_current_date())
elif self.treaty_type == 'war':
self.nation_a.at_war.remove(self.nation_b)
self.nation_b.at_war.remove(self.nation_a)
self.parent.event_log.add_event('DiplomacyWarEnd',
{'nation_a': self.nation_a.id,
'nation_b': self.nation_b.id},
self.parent.get_current_date())
def get_treaty_names(self, current_date, requesting_nation):
if not requesting_nation.id in self.names or len(self.names[requesting_nation.id]) == 0:
self.get_treaty_name(current_date, requesting_nation)
return self.names[requesting_nation.id]
def get_treaty_name(self, current_date, requesting_nation):
if self.status_changed or current_date > self.last_requested_date or not requesting_nation.id in self.names or len(self.names[requesting_nation.id]) == 0:
custom_tags = {}
if self.signing_city is not None:
custom_tags['signing_city'] = [self.signing_city.name]
else:
custom_tags['signing_city'] = [str(self.nation_a.name), str(self.nation_b.name)]
custom_tags['signing_year'] = [str(self.starting_date[0])]
custom_tags['nation_a'] = [str(self.nation_a.name)]
custom_tags['nation_b'] = [str(self.nation_b.name)]
if self.ending_date is not None:
treaty_length = utility.get_time_span_length(self.starting_date, self.ending_date)
else:
treaty_length = utility.get_time_span_length(self.starting_date, current_date)
custom_tags['treaty_length_years'] = ['{} years'.format(treaty_length[0])]
custom_tags['treaty_length'] = ['{} years and {} months'.format(treaty_length[0], treaty_length[1])]
gen = Form(TREATY_NAMES[self.treaty_type], custom_tags=custom_tags)
name = gen.generate(nation=requesting_nation)[0]
if not requesting_nation.id in self.names:
self.names[requesting_nation.id] = []
self.names[requesting_nation.id].append(name)
self.last_requested_date = current_date
self.status_changed = False
return self.names[requesting_nation.id][-1] | 43.057018 | 162 | 0.598452 |
4a2003d53498da6fc8d24a553566ec98f6280f53 | 2,197 | py | Python | Neural-Networks/oo_run_singles.py | rupertsmall/machine-learning | 999d2afb1e7409f4221b89f48a5ff60d50bbcb0a | [
"MIT"
] | null | null | null | Neural-Networks/oo_run_singles.py | rupertsmall/machine-learning | 999d2afb1e7409f4221b89f48a5ff60d50bbcb0a | [
"MIT"
] | 3 | 2015-06-29T20:31:46.000Z | 2015-09-02T21:04:23.000Z | Neural-Networks/oo_run_singles.py | rupertsmall/machine-learning | 999d2afb1e7409f4221b89f48a5ff60d50bbcb0a | [
"MIT"
] | null | null | null | #
# optimise a neural network for image recognition
# rupert small, august 2015
#
from numpy import genfromtxt
from oo_dr_singles import *
import threading
# initiate data
data = genfromtxt('train2.csv', delimiter=',')
num_cpus = 30 # multi-threading
y_vals = data[:,0] # outputs
x_vals = data[:,1:].T # inputs
alpha = 100*num_cpus**2
beta = .002
base_in = 783
input_layer = 784**2
mangle_upper = 35
xi = array([input_layer,2])
threads = []
# initiate classes
zero = dr_singles(alpha, beta, base_in, mangle_upper, num_cpus, 0)
one = dr_singles(alpha, beta, base_in, mangle_upper, num_cpus, 1)
two = dr_singles(alpha, beta, base_in, mangle_upper, num_cpus, 2)
three = dr_singles(alpha, beta, base_in, mangle_upper, num_cpus, 3)
four = dr_singles(alpha, beta, base_in, mangle_upper, num_cpus, 4)
five = dr_singles(alpha, beta, base_in, mangle_upper, num_cpus, 5)
six = dr_singles(alpha, beta, base_in, mangle_upper, num_cpus, 6)
seven = dr_singles(alpha, beta, base_in, mangle_upper, num_cpus, 7)
eight = dr_singles(alpha, beta, base_in, mangle_upper, num_cpus, 8)
nine = dr_singles(alpha, beta, base_in, mangle_upper, num_cpus, 9)
# run
thr = threading.Thread(target=zero.optimise_network, args=(xi, x_vals, y_vals))
threads.append(thr)
thr = threading.Thread(target=one.optimise_network, args=(xi, x_vals, y_vals))
threads.append(thr)
thr = threading.Thread(target=two.optimise_network, args=(xi, x_vals, y_vals))
threads.append(thr)
thr = threading.Thread(target=three.optimise_network, args=(xi, x_vals, y_vals))
threads.append(thr)
thr = threading.Thread(target=four.optimise_network, args=(xi, x_vals, y_vals))
threads.append(thr)
thr = threading.Thread(target=five.optimise_network, args=(xi, x_vals, y_vals))
threads.append(thr)
thr = threading.Thread(target=six.optimise_network, args=(xi, x_vals, y_vals))
threads.append(thr)
thr = threading.Thread(target=seven.optimise_network, args=(xi, x_vals, y_vals))
threads.append(thr)
thr = threading.Thread(target=eight.optimise_network, args=(xi, x_vals, y_vals))
threads.append(thr)
thr = threading.Thread(target=nine.optimise_network, args=(xi, x_vals, y_vals))
threads.append(thr)
for i in range(0,len(threads)):
threads[i].start()
| 36.616667 | 80 | 0.756031 |
4a200499daa0622b79529e1646e15da7da115a61 | 10,017 | py | Python | lib/datasets/imdb.py | pprp/faster-rcnn.Supernova | 583bc9f6efd80d5a7fa88189a1c817d92d6018a6 | [
"MIT"
] | 15 | 2019-04-19T12:40:09.000Z | 2020-06-03T07:56:37.000Z | lib/datasets/imdb.py | Zxl19990529/faster-rcnn.Supernova | 583bc9f6efd80d5a7fa88189a1c817d92d6018a6 | [
"MIT"
] | 2 | 2019-04-19T13:21:44.000Z | 2020-06-03T07:49:31.000Z | lib/datasets/imdb.py | Zxl19990529/faster-rcnn.Supernova | 583bc9f6efd80d5a7fa88189a1c817d92d6018a6 | [
"MIT"
] | 5 | 2019-04-19T13:06:22.000Z | 2021-01-19T03:31:58.000Z | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Xinlei Chen
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path as osp
import PIL
from model.utils.cython_bbox import bbox_overlaps
import numpy as np
import scipy.sparse
from model.utils.config import cfg
import pdb
ROOT_DIR = osp.join(osp.dirname(__file__), '..', '..')
class imdb(object):
"""Image database."""
def __init__(self, name, classes=None):
self._name = name
self._num_classes = 0
if not classes:
self._classes = []
else:
self._classes = classes
self._image_index = []
self._obj_proposer = 'gt'
self._roidb = None
self._roidb_handler = self.default_roidb
# Use this dict for storing dataset specific config options
self.config = {}
@property
def name(self):
return self._name
@property
def num_classes(self):
return len(self._classes)
@property
def classes(self):
return self._classes
@property
def image_index(self):
return self._image_index
@property
def roidb_handler(self):
return self._roidb_handler
@roidb_handler.setter
def roidb_handler(self, val):
self._roidb_handler = val
def set_proposal_method(self, method):
method = eval('self.' + method + '_roidb')
self.roidb_handler = method
@property
def roidb(self):
# A roidb is a list of dictionaries, each with the following keys:
# boxes
# gt_overlaps
# gt_classes
# flipped
if self._roidb is not None:
return self._roidb
self._roidb = self.roidb_handler()
return self._roidb
@property
def cache_path(self):
cache_path = osp.abspath(osp.join(cfg.DATA_DIR, 'cache'))
if not os.path.exists(cache_path):
os.makedirs(cache_path)
return cache_path
@property
def num_images(self):
return len(self.image_index)
def image_path_at(self, i):
raise NotImplementedError
def image_id_at(self, i):
raise NotImplementedError
def default_roidb(self):
raise NotImplementedError
def evaluate_detections(self, all_boxes, output_dir=None):
"""
all_boxes is a list of length number-of-classes.
Each list element is a list of length number-of-images.
Each of those list elements is either an empty list []
or a numpy array of detection.
all_boxes[class][image] = [] or np.array of shape #dets x 5
"""
raise NotImplementedError
def _get_widths(self):
return [PIL.Image.open(self.image_path_at(i)).size[0]
for i in range(self.num_images)]
def _get_heights(self):
return [PIL.Image.open(self.image_path_at(i)).size[1]
for i in range(self.num_images)]
def append_flipped_images(self):
num_images = self.num_images
widths = self._get_widths()
heights = self._get_heights()#add to get image height
for i in range(num_images):
boxes = self.roidb[i]['boxes'].copy()
oldx1 = boxes[:, 0].copy()
oldx2 = boxes[:, 2].copy()
#print (self.image_index[i])#print image name
assert (boxes[:,1]<=boxes[:,3]).all()#assert that ymin<=ymax
assert (boxes[:,1]>=0).all()#assert ymin>=0,for 0-based
assert (boxes[:,3]<=heights[i]).all()#assert ymax<height[i],for 0-based
#print(oldx2 ,"<", widths[i])
assert (oldx2<=widths[i]).all()#assert xmax<withd[i],for 0-based
assert (oldx1>=0).all()#assert xmin>=0, for 0-based
#print(oldx2, ">=", oldx1)
assert (oldx2 >= oldx1).all()#assert xmax>=xmin, for 0-based
boxes[:, 0] = widths[i] - oldx2 - 1
boxes[:, 2] = widths[i] - oldx1 - 1
#print ("num_image:%d"%(i))
for b in range(len(boxes)):
if boxes[b][2]< boxes[b][0]:
boxes[b][0] = 0
assert (boxes[:, 2] >= boxes[:, 0]).all()
entry = {'boxes' : boxes,
'gt_overlaps' : self.roidb[i]['gt_overlaps'],
'gt_classes' : self.roidb[i]['gt_classes'],
'flipped' : True}
self.roidb.append(entry)
def evaluate_recall(self, candidate_boxes=None, thresholds=None,
area='all', limit=None):
"""Evaluate detection proposal recall metrics.
Returns:
results: dictionary of results with keys
'ar': average recall
'recalls': vector recalls at each IoU overlap threshold
'thresholds': vector of IoU overlap thresholds
'gt_overlaps': vector of all ground-truth overlaps
"""
# Record max overlap value for each gt box
# Return vector of overlap values
areas = {'all': 0, 'small': 1, 'medium': 2, 'large': 3,
'96-128': 4, '128-256': 5, '256-512': 6, '512-inf': 7}
area_ranges = [[0 ** 2, 1e5 ** 2], # all
[0 ** 2, 32 ** 2], # small
[32 ** 2, 96 ** 2], # medium
[96 ** 2, 1e5 ** 2], # large
[96 ** 2, 128 ** 2], # 96-128
[128 ** 2, 256 ** 2], # 128-256
[256 ** 2, 512 ** 2], # 256-512
[512 ** 2, 1e5 ** 2], # 512-inf
]
assert area in areas, 'unknown area range: {}'.format(area)
area_range = area_ranges[areas[area]]
gt_overlaps = np.zeros(0)
num_pos = 0
for i in range(self.num_images):
# Checking for max_overlaps == 1 avoids including crowd annotations
# (...pretty hacking :/)
max_gt_overlaps = self.roidb[i]['gt_overlaps'].toarray().max(axis=1)
gt_inds = np.where((self.roidb[i]['gt_classes'] > 0) &
(max_gt_overlaps == 1))[0]
gt_boxes = self.roidb[i]['boxes'][gt_inds, :]
gt_areas = self.roidb[i]['seg_areas'][gt_inds]
valid_gt_inds = np.where((gt_areas >= area_range[0]) &
(gt_areas <= area_range[1]))[0]
gt_boxes = gt_boxes[valid_gt_inds, :]
num_pos += len(valid_gt_inds)
if candidate_boxes is None:
# If candidate_boxes is not supplied, the default is to use the
# non-ground-truth boxes from this roidb
non_gt_inds = np.where(self.roidb[i]['gt_classes'] == 0)[0]
boxes = self.roidb[i]['boxes'][non_gt_inds, :]
else:
boxes = candidate_boxes[i]
if boxes.shape[0] == 0:
continue
if limit is not None and boxes.shape[0] > limit:
boxes = boxes[:limit, :]
overlaps = bbox_overlaps(boxes.astype(np.float),
gt_boxes.astype(np.float))
_gt_overlaps = np.zeros((gt_boxes.shape[0]))
for j in range(gt_boxes.shape[0]):
# find which proposal box maximally covers each gt box
argmax_overlaps = overlaps.argmax(axis=0)
# and get the iou amount of coverage for each gt box
max_overlaps = overlaps.max(axis=0)
# find which gt box is 'best' covered (i.e. 'best' = most iou)
gt_ind = max_overlaps.argmax()
gt_ovr = max_overlaps.max()
assert (gt_ovr >= 0)
# find the proposal box that covers the best covered gt box
box_ind = argmax_overlaps[gt_ind]
# record the iou coverage of this gt box
_gt_overlaps[j] = overlaps[box_ind, gt_ind]
assert (_gt_overlaps[j] == gt_ovr)
# mark the proposal box and the gt box as used
overlaps[box_ind, :] = -1
overlaps[:, gt_ind] = -1
# append recorded iou coverage level
gt_overlaps = np.hstack((gt_overlaps, _gt_overlaps))
gt_overlaps = np.sort(gt_overlaps)
if thresholds is None:
step = 0.05
thresholds = np.arange(0.5, 0.95 + 1e-5, step)
recalls = np.zeros_like(thresholds)
# compute recall for each iou threshold
for i, t in enumerate(thresholds):
recalls[i] = (gt_overlaps >= t).sum() / float(num_pos)
# ar = 2 * np.trapz(recalls, thresholds)
ar = recalls.mean()
return {'ar': ar, 'recalls': recalls, 'thresholds': thresholds,
'gt_overlaps': gt_overlaps}
def create_roidb_from_box_list(self, box_list, gt_roidb):
assert len(box_list) == self.num_images, \
'Number of boxes must match number of ground-truth images'
roidb = []
for i in range(self.num_images):
boxes = box_list[i]
num_boxes = boxes.shape[0]
overlaps = np.zeros((num_boxes, self.num_classes), dtype=np.float32)
if gt_roidb is not None and gt_roidb[i]['boxes'].size > 0:
gt_boxes = gt_roidb[i]['boxes']
gt_classes = gt_roidb[i]['gt_classes']
gt_overlaps = bbox_overlaps(boxes.astype(np.float),
gt_boxes.astype(np.float))
argmaxes = gt_overlaps.argmax(axis=1)
maxes = gt_overlaps.max(axis=1)
I = np.where(maxes > 0)[0]
overlaps[I, gt_classes[argmaxes[I]]] = maxes[I]
overlaps = scipy.sparse.csr_matrix(overlaps)
roidb.append({
'boxes': boxes,
'gt_classes': np.zeros((num_boxes,), dtype=np.int32),
'gt_overlaps': overlaps,
'flipped': False,
'seg_areas': np.zeros((num_boxes,), dtype=np.float32),
})
return roidb
@staticmethod
def merge_roidbs(a, b):
assert len(a) == len(b)
for i in range(len(a)):
a[i]['boxes'] = np.vstack((a[i]['boxes'], b[i]['boxes']))
a[i]['gt_classes'] = np.hstack((a[i]['gt_classes'],
b[i]['gt_classes']))
a[i]['gt_overlaps'] = scipy.sparse.vstack([a[i]['gt_overlaps'],
b[i]['gt_overlaps']])
a[i]['seg_areas'] = np.hstack((a[i]['seg_areas'],
b[i]['seg_areas']))
return a
def competition_mode(self, on):
"""Turn competition mode on or off."""
pass
| 35.271127 | 81 | 0.5881 |
4a2006ccc75ffc2e814775ea21ecb159568abea9 | 92 | py | Python | peakingduck/__init__.py | fispact/peakingduck | 748cfa2341710d99083db950a2d66ffb139eecf2 | [
"MIT"
] | 5 | 2020-12-26T17:49:24.000Z | 2021-05-21T03:10:45.000Z | peakingduck/__init__.py | fispact/peakingduck | 748cfa2341710d99083db950a2d66ffb139eecf2 | [
"MIT"
] | 18 | 2020-01-28T20:38:48.000Z | 2020-03-06T16:34:48.000Z | peakingduck/__init__.py | thomasms/peakingduck | 748cfa2341710d99083db950a2d66ffb139eecf2 | [
"MIT"
] | 5 | 2020-03-16T16:21:35.000Z | 2022-02-13T09:09:40.000Z | import peakingduck.util as util
import peakingduck.core as core
import peakingduck.io as io
| 23 | 31 | 0.836957 |
4a20079426360beaaa90f313d0215afb2699ed16 | 3,726 | py | Python | rllib/examples/custom_input_api.py | jianoaix/ray | 1701b923bc83905f8961c06a6a173e3eba46a936 | [
"Apache-2.0"
] | null | null | null | rllib/examples/custom_input_api.py | jianoaix/ray | 1701b923bc83905f8961c06a6a173e3eba46a936 | [
"Apache-2.0"
] | 41 | 2021-09-21T01:13:48.000Z | 2022-03-19T07:12:22.000Z | rllib/examples/custom_input_api.py | jianoaix/ray | 1701b923bc83905f8961c06a6a173e3eba46a936 | [
"Apache-2.0"
] | 1 | 2019-09-24T16:24:49.000Z | 2019-09-24T16:24:49.000Z | """Example of creating a custom input api
Custom input apis are useful when your data source is in a custom format or
when it is necessary to use an external data loading mechanism.
In this example, we train an rl agent on user specified input data.
Instead of using the built in JsonReader, we will create our own custom input
api, and show how to pass config arguments to it.
To train CQL on the pendulum environment:
$ python custom_input_api.py --input-files=../tests/data/pendulum/enormous.zip
"""
import argparse
import os
import ray
from ray import tune
from ray.rllib.offline import JsonReader, ShuffledInput, IOContext, InputReader
from ray.tune.registry import register_input
parser = argparse.ArgumentParser()
parser.add_argument(
"--run", type=str, default="CQL", help="The RLlib-registered algorithm to use."
)
parser.add_argument(
"--framework",
choices=["tf", "tf2", "tfe", "torch"],
default="tf",
help="The DL framework specifier.",
)
parser.add_argument("--stop-iters", type=int, default=100)
parser.add_argument(
"--input-files",
type=str,
default=os.path.join(
os.path.dirname(os.path.abspath(__file__)), "../tests/data/pendulum/small.json"
),
)
class CustomJsonReader(JsonReader):
"""
Example custom InputReader implementation (extended from JsonReader).
This gets wrapped in ShuffledInput to comply with offline rl algorithms.
"""
def __init__(self, ioctx: IOContext):
"""
The constructor must take an IOContext to be used in the input config.
Args:
ioctx: use this to access the `input_config` arguments.
"""
super().__init__(ioctx.input_config["input_files"], ioctx)
def input_creator(ioctx: IOContext) -> InputReader:
"""
The input creator method can be used in the input registry or set as the
config["input"] parameter.
Args:
ioctx: use this to access the `input_config` arguments.
Returns:
instance of ShuffledInput to work with some offline rl algorithms
"""
return ShuffledInput(CustomJsonReader(ioctx))
if __name__ == "__main__":
ray.init()
args = parser.parse_args()
# make absolute path because relative path looks in result directory
args.input_files = os.path.abspath(args.input_files)
# we register our custom input creator with this convenient function
register_input("custom_input", input_creator)
# config modified from rllib/tuned_examples/cql/pendulum-cql.yaml
config = {
"env": "Pendulum-v1",
# we can either use the tune registry, class path, or direct function
# to connect our input api.
"input": "custom_input",
# "input": "ray.rllib.examples.custom_input_api.CustomJsonReader",
# "input": input_creator,
# this gets passed to the IOContext
"input_config": {
"input_files": args.input_files,
},
"framework": args.framework,
"actions_in_input_normalized": True,
"clip_actions": True,
"twin_q": True,
"train_batch_size": 2000,
"learning_starts": 0,
"bc_iters": 100,
"metrics_num_episodes_for_smoothing": 5,
"evaluation_interval": 1,
"evaluation_num_workers": 2,
"evaluation_duration": 10,
"evaluation_parallel_to_training": True,
"evaluation_config": {
"input": "sampler",
"explore": False,
},
}
stop = {
"training_iteration": args.stop_iters,
"evaluation/episode_reward_mean": -600,
}
analysis = tune.run(args.run, config=config, stop=stop, verbose=1)
info = analysis.results[next(iter(analysis.results))]["info"]
| 31.576271 | 87 | 0.671229 |
4a2007c12ab2a02692e3171472cf6397eb33257b | 7,334 | py | Python | gencapdefs.py | erincerys/ergo | 0aeedcdcccb5348d8eedb5faa6a0536d93ca3ae3 | [
"MIT"
] | 1,122 | 2017-06-15T05:44:52.000Z | 2021-05-26T16:27:43.000Z | gencapdefs.py | erincerys/ergo | 0aeedcdcccb5348d8eedb5faa6a0536d93ca3ae3 | [
"MIT"
] | 1,031 | 2017-06-18T13:57:51.000Z | 2021-05-26T19:51:37.000Z | gencapdefs.py | erincerys/ergo | 0aeedcdcccb5348d8eedb5faa6a0536d93ca3ae3 | [
"MIT"
] | 113 | 2017-06-21T18:32:53.000Z | 2021-05-26T13:12:46.000Z | #!/usr/bin/env python3
"""
Updates the capability definitions at irc/caps/defs.go
To add a capability, add it to the CAPDEFS list below,
then run `make capdefs` from the project root.
"""
import io
import subprocess
import sys
from collections import namedtuple
CapDef = namedtuple("CapDef", ['identifier', 'name', 'url', 'standard'])
CAPDEFS = [
CapDef(
identifier="AccountNotify",
name="account-notify",
url="https://ircv3.net/specs/extensions/account-notify-3.1.html",
standard="IRCv3",
),
CapDef(
identifier="AccountTag",
name="account-tag",
url="https://ircv3.net/specs/extensions/account-tag-3.2.html",
standard="IRCv3",
),
CapDef(
identifier="AwayNotify",
name="away-notify",
url="https://ircv3.net/specs/extensions/away-notify-3.1.html",
standard="IRCv3",
),
CapDef(
identifier="Batch",
name="batch",
url="https://ircv3.net/specs/extensions/batch-3.2.html",
standard="IRCv3",
),
CapDef(
identifier="CapNotify",
name="cap-notify",
url="https://ircv3.net/specs/extensions/cap-notify-3.2.html",
standard="IRCv3",
),
CapDef(
identifier="ChgHost",
name="chghost",
url="https://ircv3.net/specs/extensions/chghost-3.2.html",
standard="IRCv3",
),
CapDef(
identifier="EchoMessage",
name="echo-message",
url="https://ircv3.net/specs/extensions/echo-message-3.2.html",
standard="IRCv3",
),
CapDef(
identifier="ExtendedJoin",
name="extended-join",
url="https://ircv3.net/specs/extensions/extended-join-3.1.html",
standard="IRCv3",
),
CapDef(
identifier="InviteNotify",
name="invite-notify",
url="https://ircv3.net/specs/extensions/invite-notify-3.2.html",
standard="IRCv3",
),
CapDef(
identifier="LabeledResponse",
name="labeled-response",
url="https://ircv3.net/specs/extensions/labeled-response.html",
standard="IRCv3",
),
CapDef(
identifier="Languages",
name="draft/languages",
url="https://gist.github.com/DanielOaks/8126122f74b26012a3de37db80e4e0c6",
standard="proposed IRCv3",
),
CapDef(
identifier="MessageTags",
name="message-tags",
url="https://ircv3.net/specs/extensions/message-tags.html",
standard="IRCv3",
),
CapDef(
identifier="MultiPrefix",
name="multi-prefix",
url="https://ircv3.net/specs/extensions/multi-prefix-3.1.html",
standard="IRCv3",
),
CapDef(
identifier="Relaymsg",
name="draft/relaymsg",
url="https://github.com/ircv3/ircv3-specifications/pull/417",
standard="proposed IRCv3",
),
CapDef(
identifier="ChannelRename",
name="draft/channel-rename",
url="https://ircv3.net/specs/extensions/channel-rename",
standard="draft IRCv3",
),
CapDef(
identifier="SASL",
name="sasl",
url="https://ircv3.net/specs/extensions/sasl-3.2.html",
standard="IRCv3",
),
CapDef(
identifier="ServerTime",
name="server-time",
url="https://ircv3.net/specs/extensions/server-time-3.2.html",
standard="IRCv3",
),
CapDef(
identifier="SetName",
name="setname",
url="https://ircv3.net/specs/extensions/setname.html",
standard="IRCv3",
),
CapDef(
identifier="STS",
name="sts",
url="https://ircv3.net/specs/extensions/sts.html",
standard="IRCv3",
),
CapDef(
identifier="UserhostInNames",
name="userhost-in-names",
url="https://ircv3.net/specs/extensions/userhost-in-names-3.2.html",
standard="IRCv3",
),
CapDef(
identifier="ZNCSelfMessage",
name="znc.in/self-message",
url="https://wiki.znc.in/Query_buffers",
standard="ZNC vendor",
),
CapDef(
identifier="EventPlayback",
name="draft/event-playback",
url="https://github.com/ircv3/ircv3-specifications/pull/362",
standard="proposed IRCv3",
),
CapDef(
identifier="ZNCPlayback",
name="znc.in/playback",
url="https://wiki.znc.in/Playback",
standard="ZNC vendor",
),
CapDef(
identifier="Nope",
name="ergo.chat/nope",
url="https://ergo.chat/nope",
standard="Ergo vendor",
),
CapDef(
identifier="Multiline",
name="draft/multiline",
url="https://github.com/ircv3/ircv3-specifications/pull/398",
standard="proposed IRCv3",
),
CapDef(
identifier="Chathistory",
name="draft/chathistory",
url="https://github.com/ircv3/ircv3-specifications/pull/393",
standard="proposed IRCv3",
),
CapDef(
identifier="AccountRegistration",
name="draft/account-registration",
url="https://github.com/ircv3/ircv3-specifications/pull/435",
standard="draft IRCv3",
),
CapDef(
identifier="ExtendedMonitor",
name="draft/extended-monitor",
url="https://github.com/ircv3/ircv3-specifications/pull/466",
standard="draft IRCv3",
),
]
def validate_defs():
CAPDEFS.sort(key=lambda d: d.name)
numCaps = len(CAPDEFS)
numNames = len(set(capdef.name for capdef in CAPDEFS))
if numCaps != numNames:
raise Exception("defs must have unique names, but found duplicates")
numIdentifiers = len(set(capdef.identifier for capdef in CAPDEFS))
if numCaps != numIdentifiers:
raise Exception("defs must have unique identifiers, but found duplicates")
def main():
validate_defs()
output = io.StringIO()
print("""
package caps
/*
WARNING: this file is autogenerated by `make capdefs`
DO NOT EDIT MANUALLY.
*/
""", file=output)
numCapabs = len(CAPDEFS)
bitsetLen = numCapabs // 32
if numCapabs % 32 > 0:
bitsetLen += 1
print ("""
const (
// number of recognized capabilities:
numCapabs = %d
// length of the uint64 array that represents the bitset:
bitsetLen = %d
)
""" % (numCapabs, bitsetLen), file=output)
print("const (", file=output)
for capdef in CAPDEFS:
print("// %s is the %s capability named \"%s\":" % (capdef.identifier, capdef.standard, capdef.name), file=output)
print("// %s" % (capdef.url,), file=output)
print("%s Capability = iota" % (capdef.identifier,), file=output)
print(file=output)
print(")", file=output)
print("// `capabilityNames[capab]` is the string name of the capability `capab`", file=output)
print("""var ( capabilityNames = [numCapabs]string{""", file=output)
for capdef in CAPDEFS:
print("\"%s\"," % (capdef.name,), file=output)
print("})", file=output)
# run the generated code through `gofmt -s`, which will print it to stdout
gofmt = subprocess.Popen(['gofmt', '-s'], stdin=subprocess.PIPE)
gofmt.communicate(input=output.getvalue().encode('utf-8'))
if gofmt.poll() != 0:
print(output.getvalue())
raise Exception("gofmt failed")
return 0
if __name__ == '__main__':
sys.exit(main())
| 29.336 | 122 | 0.600082 |
4a20094ae3d7323c4517e1044da5cf25ac752cd0 | 3,379 | py | Python | render_road_multi.py | benchoi93/AdvSpdRL | 2cf6ac7934ad48b85f1e90df1d5015853a09ac5c | [
"MIT"
] | null | null | null | render_road_multi.py | benchoi93/AdvSpdRL | 2cf6ac7934ad48b85f1e90df1d5015853a09ac5c | [
"MIT"
] | null | null | null | render_road_multi.py | benchoi93/AdvSpdRL | 2cf6ac7934ad48b85f1e90df1d5015853a09ac5c | [
"MIT"
] | null | null | null | from pathlib import Path
from util.plotutil import info_graph, info_graph_separate, info_graph_detail, make_gif
from stable_baselines3.common.callbacks import CheckpointCallback
from stable_baselines3 import PPO, SAC, DDPG, A2C, DQN, TD3
import numpy as np
from PIL import Image
from rl_env.adv_spd_env_road_multi import AdvSpdEnvRoadMulti
import time
import matplotlib.pyplot as plt
import pickle
import glob
import os
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
modelname = 'PPO'
cuda = '0'
i = 0
# coef_power = 0.01
# param = 'AdvSpdRL_DDPG_3500000_steps'
# for cuda in range(4):
# cuda = str(cuda)
# for i in range(100):
# try:
env: AdvSpdEnvRoadMulti = pickle.load(open(os.path.join('params', f'{modelname}{cuda}', 'env.pkl'), 'rb'))
# env = AdvSpdEnvRoadMulti(num_signal=3, num_action_unit=3)
model = globals()[modelname]("MlpPolicy", env, verbose=1, device='cpu')
list_of_files = glob.glob(os.path.join('params', f'{modelname}{cuda}/*.zip'))
latest_file = max(list_of_files, key=os.path.getmtime)
model = model.load(latest_file, device='cpu')
path = Path(f'simulate_gif/{modelname}{cuda}/')
if not path.exists():
path.mkdir()
ob = env.reset()
pickle.dump(env, open(f'simulate_gif/{modelname}{cuda}/env_{i}.pkl', 'wb'))
episode_over = False
combine = True
cnt = 0
print("-------------------------------------")
while not episode_over:
action, _ = model.predict(ob)
print(f"{cnt=} || {action=}")
cnt += 1
ob, reward, episode_over, info = env.step(action)
env1 = env
print(env.timestep/10)
env = pickle.load(open(f'simulate_gif/{modelname}{cuda}/env_{i}.pkl', 'rb'))
episode_over = False
combine = True
cnt = 0
print("-------------------------------------")
while not episode_over:
action = np.array([9])
print(f"{cnt=} || {action=}")
cnt += 1
ob, reward, episode_over, info = env.step(action)
env2 = env
print(env.timestep/10)
print("-------------------------------------")
env_list = [env1]
# env_list = [env1, env2]
info_graph(env_list, [env.vehicle.veh_info[:env.timestep+1] for env in env_list], check_finish=True, path=f'simulate_gif/{modelname}{cuda}/infograph_base_{i}.png')
# info_graph_separate(env_list, [env.vehicle.veh_info[:env.timestep+1] for env in env_list], path=f'simulate_gif/{modelname}{cuda}/infograph_separate_{i}.png')
info_graph_detail(env_list, [env.vehicle.veh_info[:env.timestep+1] for env in env_list], True, path=f'simulate_gif/{modelname}{cuda}/infograph_detail_separate_{i}.png')
info_graph_detail(env_list, [env.vehicle.veh_info[:env.timestep+1] for env in env_list], False, path=f'simulate_gif/{modelname}{cuda}/infograph_detail_nonseparate_{i}.png')
render_gif = True
if render_gif == True:
env_num = 0
for env in env_list:
print("@ env{}/ Making Car-moving gif".format(env_num))
make_gif(env, env_num)
env_num += 1
print("-------------------------------------")
env_num = 0
for env in env_list:
print("@ env{}/ Test information".format(env_num))
env_num += 1
print("reward coef: ", env.reward_coef)
print("reward: {}".format(np.round((env.vehicle.veh_info[:, 4]).sum(), 3)))
# print("execution time: {}".format(np.round(finish-start), 5))
print("# of episodes: {}".format(env.timestep))
# print("execution time per episode: {}".format((finish-start)/env.timestep))
print('-------------------------------------')
| 33.455446 | 172 | 0.666765 |
4a2009fd16d6c0c196e1a8739c25ff6d5b6536e4 | 723 | py | Python | pyfr/plugins/base.py | tjcorona/PyFR | a72b41580043bb001e5a9e6bb79a0e305d48e052 | [
"BSD-3-Clause"
] | null | null | null | pyfr/plugins/base.py | tjcorona/PyFR | a72b41580043bb001e5a9e6bb79a0e305d48e052 | [
"BSD-3-Clause"
] | null | null | null | pyfr/plugins/base.py | tjcorona/PyFR | a72b41580043bb001e5a9e6bb79a0e305d48e052 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod
class BasePlugin(object, metaclass=ABCMeta):
name = None
systems = None
@abstractmethod
def __init__(self, intg, cfgsect, suffix=None):
self.cfg = intg.cfg
self.cfgsect = cfgsect
self.suffix = suffix
self.ndims = intg.system.ndims
self.nvars = intg.system.nvars
# Check that we support this particular system
if not ('*' in self.systems or intg.system.name in self.systems):
raise RuntimeError('System {0} not supported by plugin {1}'
.format(intg.system.name, self.name))
@abstractmethod
def __call__(self, intg):
pass
| 25.821429 | 73 | 0.609959 |
4a200b1882a662996aef50799aea64cc6cae0379 | 2,077 | py | Python | Linux-Operation0605/app/core/constants.py | zhouli121018/nodejsgm | 0ccbc8acf61badc812f684dd39253d55c99f08eb | [
"MIT"
] | null | null | null | Linux-Operation0605/app/core/constants.py | zhouli121018/nodejsgm | 0ccbc8acf61badc812f684dd39253d55c99f08eb | [
"MIT"
] | 18 | 2020-06-05T18:17:40.000Z | 2022-03-11T23:25:21.000Z | Linux-Operation0605/app/core/constants.py | zhouli121018/nodejsgm | 0ccbc8acf61badc812f684dd39253d55c99f08eb | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
'''
常量
'''
USER_TYPE = (
('systemadmin', u'系统管理员'),
('superadmin', u'超管'),
('deptadmin', u'部门管理员'),
('domainadmin', u'域名管理员'),
)
PROXY_CONFIG_DISABLED = (
("-1", u'启用'),
("1", u'禁用'),
)
DISABLED_STATUS = (
("-1", u'启用'),
("1", u'禁用'),
)
PROXY_SERVER_STATUS = (
('', u''),
('unconnect', u'未连接'),
('connected', u'已连接'),
('disconnected', u'连接断开'),
('conn_error', u'连接出错'),
)
PROXY_MOVE_TYPE = (
("from", u'迁入'),
("to", u'迁出'),
)
PROXY_MOVE_STATUS = (
("init", u'初始化'),
("wait", u'等待同步'),
("sync", u'正在同步'),
("accept", u'已接收'),
("ready", u'等待删帐号'),
("backup", u'正在备份'),
("ask_delete", u'开始删除帐号'),
("deleted", u'已删除帐号'),
("create", u'等待目标服创建帐号'),
("done", u'成功创建'),
("imap_recv", u'正在通过imap接收邮件'),
("finish", u'已完结'),
("unvalid", u'出错'),
)
CORE_ALIS_TYPE = (
('mailbox', u'邮箱'),
('domain', u'域名'),
('system', u'系统'),
('review', u'审核'),
)
BLACK_WHITE_OPTOR = (
('user', u'普通用户'),
('sys', u'管理员'),
)
BLACK_WHITE_TYPE = (
('recv', u'接收'),
('send', u'发送'),
)
ATTR_TYPR = (
('webmail', u'webmail'),
('system', u'system'),
)
MONITOR_LISTEN_TYPE = (
(u'recipient', u'收信监控'),
(u'sender', u'发信监控'),
)
MONITOR_TARGET_TYPE = (
(u'*', u'所有'),
(u'in', u'接收'),
(u'out', u'外发'),
)
MONITOR_MAILMOVE_SELECT = (
(u'1', u'监控'),
(u'-1', u'不监控'),
)
MAILBOX_SEND_PERMIT = (
(u"-1", u"不限制邮件发送"),
(u"1", u"禁止发送所有邮件"),
(u"2", u"只发送本地域邮件"),
(u"3", u"可发送指定外域邮件"),
(u"4", u"可发送本地所有域邮件"),
)
MAILBOX_RECV_PERMIT = (
(u"-1", u"不限制邮件接收"),
(u"1", u"禁止接收所有邮件"),
(u"2", u"只接收本地域邮件"),
(u"3", u"可接收指定外域邮件"),
(u"4", u"可接收本地所有域邮件"),
)
MAILBOX_LIMIT_LOGIN = (
(u"-1", u"不限制登录方式"),
(u"1", u"禁止网页登录"),
)
MAILBOX_CHANGE_PWD = (
(u"-1", u"不修改"),
(u"1", u"修改"),
(u"2", u"修改并禁用帐号"),
)
MAILBOX_ENABLE = (
(-1, u"禁用"),
(1, u"开启"),
)
USER_SHOW = (
(-1, u"不显示"),
(1, u"显示"),
)
GENDER = (
('male', u"男"),
('female', u"女"),
)
| 15.854962 | 35 | 0.466057 |
4a200b3d26120d30cf8f8e36c20af5d92049a97e | 109 | py | Python | transcription_analysis/apps.py | dighr/AudioAnalysis | b22d1afd00a1834dbd5e2a87418e8299a4fdd29d | [
"MIT"
] | 4 | 2018-12-13T04:36:40.000Z | 2022-02-27T23:52:37.000Z | transcription_analysis/apps.py | dighr/AudioAnalysis | b22d1afd00a1834dbd5e2a87418e8299a4fdd29d | [
"MIT"
] | 40 | 2018-11-07T19:46:34.000Z | 2020-04-04T16:42:48.000Z | transcription_analysis/apps.py | dighr/AudioAnalysis | b22d1afd00a1834dbd5e2a87418e8299a4fdd29d | [
"MIT"
] | 1 | 2020-09-12T14:49:01.000Z | 2020-09-12T14:49:01.000Z | from django.apps import AppConfig
class TextAnalysisConfig(AppConfig):
name = 'transcription_analysis'
| 18.166667 | 36 | 0.798165 |
4a200b76c351d447ff67ed7ae25acad3e9743083 | 1,324 | py | Python | gui/_load_weights.py | parot-99/COVID-19-Warning-System | 41c53f41db3a0e3b78c6b934c2964a96c4c2f635 | [
"BSD-3-Clause"
] | 2 | 2020-08-17T19:33:38.000Z | 2020-08-26T18:39:03.000Z | gui/_load_weights.py | parot-99/COVID-19-Warning-System | 41c53f41db3a0e3b78c6b934c2964a96c4c2f635 | [
"BSD-3-Clause"
] | null | null | null | gui/_load_weights.py | parot-99/COVID-19-Warning-System | 41c53f41db3a0e3b78c6b934c2964a96c4c2f635 | [
"BSD-3-Clause"
] | 1 | 2021-01-04T17:47:22.000Z | 2021-01-04T17:47:22.000Z | from PyQt5.QtWidgets import QFileDialog
from yolo.model.yolo import Yolo
def load_mask_weights(self):
model = (
Yolo(2)
if self.detection_model == "yolo"
else Yolo(2, tiny=True)
)
mask_weights_path = QFileDialog.getOpenFileName()[0]
if mask_weights_path == "":
return
self.maskPathLabel.setText(f"Path: {mask_weights_path}")
self.maskLoadedLabel.setText("Loading weights...")
model.load_weights(mask_weights_path)
self.mask_detector = model.get_graph()
self.maskLoadedLabel.setText("Weights loaded!")
self.maskLoadedLabel.setStyleSheet(
'font: 12pt "MS Shell Dlg 2";\n' "color: #50fa7b;"
)
def load_distance_weights(self):
model = (
Yolo(80)
if self.detection_model == "yolo"
else Yolo(80, tiny=True)
)
distance_weights_path = QFileDialog.getOpenFileName()[0]
if distance_weights_path == "":
return
self.distancePathLabel.setText(f"Path: {distance_weights_path}")
self.distanceLoadedLabel.setText("Loading weights...")
model.load_weights(distance_weights_path)
self.distance_detector = model.get_graph()
self.distanceLoadedLabel.setText("Weights loaded!")
self.distanceLoadedLabel.setStyleSheet(
'font: 12pt "MS Shell Dlg 2";\n' "color: #50fa7b;"
) | 30.090909 | 68 | 0.679758 |
4a200b7dd4aea2426357ae1d333d95acef61b318 | 71,499 | py | Python | rowboat/plugins/admin.py | elderlabs/jetski | aa04d09fa41ec924eea5e2aee57135c2b7384a83 | [
"MIT"
] | null | null | null | rowboat/plugins/admin.py | elderlabs/jetski | aa04d09fa41ec924eea5e2aee57135c2b7384a83 | [
"MIT"
] | null | null | null | rowboat/plugins/admin.py | elderlabs/jetski | aa04d09fa41ec924eea5e2aee57135c2b7384a83 | [
"MIT"
] | null | null | null | import re
import csv
import time
import gevent
import humanize
import operator
from io import StringIO
from peewee import fn
from holster.emitter import Priority
from functools import reduce
from fuzzywuzzy import fuzz
from datetime import datetime, timedelta
from disco.bot import CommandLevels
from disco.types.user import User as DiscoUser
from disco.types.channel import Channel as DiscoChannel
from disco.types.guild import GuildMember
from disco.types.message import MessageTable, MessageEmbed, MessageEmbedField, MessageEmbedThumbnail
from disco.types.permissions import Permissions
from disco.util.functional import chunks
from disco.util.sanitize import S
from disco.api.http import APIException
from rowboat.plugins import RowboatPlugin as Plugin, CommandFail, CommandSuccess
from rowboat.util.timing import Eventual
from rowboat.util.images import get_dominant_colors_user
from rowboat.util.input import parse_duration, humanize_duration
from rowboat.redis import rdb
from rowboat.types import Field, DictField, ListField, snowflake, SlottedModel
from rowboat.types.plugin import PluginConfig
from rowboat.plugins.modlog import Actions
from rowboat.models.user import User, Infraction
from rowboat.models.guild import GuildMemberBackup, GuildBan, GuildEmoji, GuildVoiceSession
from rowboat.models.message import Message, Reaction, MessageArchive
from rowboat.constants import (
GREEN_TICK_EMOJI_ID, RED_TICK_EMOJI_ID, GREEN_TICK_EMOJI, RED_TICK_EMOJI
)
EMOJI_RE = re.compile(r'<:[a-zA-Z0-9_]+:([0-9]+)>')
CUSTOM_EMOJI_STATS_SERVER_SQL = """
SELECT gm.emoji_id, gm.name, count(*) FROM guild_emojis gm
JOIN messages m ON m.emojis @> ARRAY[gm.emoji_id]
WHERE gm.deleted=false AND gm.guild_id={guild} AND m.guild_id={guild}
GROUP BY 1, 2
ORDER BY 3 {}
LIMIT 30
"""
CUSTOM_EMOJI_STATS_GLOBAL_SQL = """
SELECT gm.emoji_id, gm.name, count(*) FROM guild_emojis gm
JOIN messages m ON m.emojis @> ARRAY[gm.emoji_id]
WHERE gm.deleted=false AND gm.guild_id={guild}
GROUP BY 1, 2
ORDER BY 3 {}
LIMIT 30
"""
def clamp(string, size):
if len(string) > size:
return string[:size] + '…'
return string
def maybe_string(obj, exists, notexists, **kwargs):
if obj:
return exists.format(o=obj, **kwargs)
return notexists.format(**kwargs)
class PersistConfig(SlottedModel):
roles = Field(bool, default=False)
nickname = Field(bool, default=False)
voice = Field(bool, default=False)
role_ids = ListField(snowflake, default=[])
class AdminConfig(PluginConfig):
confirm_actions = Field(bool, default=True)
# Role saving information
persist = Field(PersistConfig, default=None)
# Aliases to roles, can be used in place of IDs in commands
role_aliases = DictField(str, snowflake)
# Group roles can be joined/left by any user
group_roles = DictField(lambda value: value.lower(), snowflake)
group_confirm_reactions = Field(bool, default=False)
# Locked roles cannot be changed unless they are unlocked w/ command
locked_roles = ListField(snowflake)
# The mute role
mute_role = Field(snowflake, default=None)
reason_edit_level = Field(int, default=int(CommandLevels.ADMIN))
# Infraction DMs
infraction_dms = Field(bool, default=False)
dms_include_mod = Field(bool, default=True)
def infraction_message(event, user, action, server, moderator, reason, expires='Never', auto=False):
infractions = {
'warn': {
'name': 'Warning',
'context': 'warned'
},
'mute': {
'name': 'Mute',
'context': 'muted'
},
'tempmute': {
'name': 'Tempmute',
'context': 'temporarily muted'
},
'kick': {
'name': 'Kick',
'context': 'kicked'
},
'ban': {
'name': 'Ban',
'context': 'banned'
},
'tempban': {
'name': 'Tempban',
'context': 'temporarily banned'
}
}
if not reason:
reason = '*Not specified*'
if expires != 'Never':
# Hacky time humanizer
now = datetime.utcnow()
diff_delta = (expires - now)
diff_delta = (expires - now)
diff = int(diff_delta.total_seconds())
minutes, seconds = divmod(diff, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
weeks, days = divmod(days, 30)
units = [weeks, days, hours, minutes, seconds]
unit_strs = ['week', 'day', 'hour', 'minute', 'second']
expires = ''
for x in range(0, 5):
if units[x] == 0:
continue
else:
if units[x] > 1:
expires += '{} {}s, '.format(units[x], unit_strs[x])
else:
expires += '{} {}, '.format(units[x], unit_strs[x])
expires = expires[:-2]
embed = MessageEmbed()
embed.title = '{}'.format(server)
embed.set_footer(text='This is an automated message. Contact the moderators for more information.')
embed.timestamp = datetime.utcnow().isoformat()
if action in ('mute', 'tempmute', 'warn'):
embed.color = 0xfdfd96
elif action == 'kick':
embed.color = 0xffb347
else:
embed.color = 0xff6961
embed.add_field(name='Action', value=infractions[action]['name'], inline=True)
# Also hacky method to avoid SpamPlugin attribute errors on dms_include_mod
if auto:
embed.add_field(name='Moderator', value=moderator, inline=True)
else:
if event.config.dms_include_mod:
embed.add_field(name='Moderator', value=moderator, inline=True)
embed.add_field(name='Reason', value=reason, inline=True)
embed.add_field(name='Expires in', value=expires, inline=True)
return infractions, embed
@Plugin.with_config(AdminConfig)
class AdminPlugin(Plugin):
def load(self, ctx):
super(AdminPlugin, self).load(ctx)
self.cleans = {}
self.inf_task = Eventual(self.clear_infractions)
self.spawn_later(5, self.queue_infractions)
self.unlocked_roles = {}
self.role_debounces = {}
def send_infraction_dm(self, event, user, action, server, moderator, reason, expires='Never'):
if not event.config.infraction_dms:
return
infractions, embed = infraction_message(event, user, action, server, moderator, reason, expires)
try:
dm = self.client.api.users_me_dms_create(user)
if action in ('kick', 'ban', 'tempban'):
preposition = 'from'
else:
preposition = 'in'
return dm.send_message('You\'ve been {} {} **{}**.'.format(infractions[action]['context'], preposition, server), embed=embed)
except:
raise
def queue_infractions(self):
next_infraction = list(Infraction.select().where(
Infraction.active &
(~(Infraction.expires_at >> None))
).order_by(Infraction.expires_at.asc()).limit(1))
if not next_infraction:
self.log.info('[INF] no infractions to wait for')
return
self.log.info('[INF] waiting until %s for %s', next_infraction[0].expires_at, next_infraction[0].id)
self.inf_task.set_next_schedule(next_infraction[0].expires_at)
def clear_infractions(self):
expired = list(Infraction.select().where(
Infraction.active &
(Infraction.expires_at < datetime.utcnow())
))
self.log.info('[INF] attempting to clear %s expired infractions', len(expired))
for item in expired:
guild = self.bot.client.state.guilds.get(item.guild_id)
if not guild:
self.log.warning('[INF] failed to clear infraction %s, no guild exists', item.id)
continue
# TODO: hacky
type_ = {i.index: i for i in Infraction.Types.attrs}[item.type_]
if type_ == Infraction.Types.TEMPBAN:
self.call(
'ModLogPlugin.create_debounce',
guild.id,
['GuildBanRemove'],
user_id=item.user_id,
)
guild.delete_ban(item.user_id)
# TODO: perhaps join on users above and use username from db
self.call(
'ModLogPlugin.log_action_ext',
Actions.MEMBER_TEMPBAN_EXPIRE,
guild.id,
user_id=item.user_id,
user=(self.bot.client.state.users.get(item.user_id) or item.user_id),
inf=item
)
elif type_ == Infraction.Types.TEMPMUTE or Infraction.Types.TEMPROLE:
member = guild.get_member(item.user_id)
if member:
if item.metadata['role'] in member.roles:
self.call(
'ModLogPlugin.create_debounce',
guild.id,
['GuildMemberUpdate'],
user_id=item.user_id,
role_id=item.metadata['role'],
)
member.remove_role(item.metadata['role'])
self.call(
'ModLogPlugin.log_action_ext',
Actions.MEMBER_TEMPMUTE_EXPIRE,
guild.id,
member=member,
inf=item
)
else:
GuildMemberBackup.remove_role(
item.guild_id,
item.user_id,
item.metadata['role'])
else:
self.log.warning('[INF] failed to clear infraction %s, type is invalid %s', item.id, item.type_)
continue
# TODO: n+1
item.active = False
item.save()
# Wait a few seconds to backoff from a possible bad loop, and requeue new infractions
gevent.sleep(5)
self.queue_infractions()
def restore_user(self, event, member):
try:
backup = GuildMemberBackup.get(guild_id=event.guild_id, user_id=member.user.id)
except GuildMemberBackup.DoesNotExist:
return
kwargs = {}
if event.config.persist.nickname and backup.nick:
kwargs['nick'] = backup.nick
if event.config.persist.roles:
roles = set(event.guild.roles.keys())
if event.config.persist.role_ids:
roles &= set(event.config.persist.role_ids)
roles = set(backup.roles) & roles
if roles:
kwargs['roles'] = list(roles)
if event.config.persist.voice:
if backup.mute:
kwargs['mute'] = backup.mute
if backup.deaf:
kwargs['deaf'] = backup.deaf
if not kwargs:
return
self.call(
'ModLogPlugin.create_debounce',
event,
['GuildMemberUpdate'],
)
member.modify(**kwargs)
self.call(
'ModLogPlugin.log_action_ext',
Actions.MEMBER_RESTORE,
event.guild.id,
member=member,
elements=', '.join(kwargs.keys())
)
@Plugin.listen('GuildMemberRemove', priority=Priority.BEFORE)
def on_guild_member_remove(self, event):
if event.user.id in event.guild.members:
GuildMemberBackup.create_from_member(event.guild.members.get(event.user.id))
@Plugin.listen('GuildMemberAdd')
def on_guild_member_add(self, event):
if not event.config.persist:
return
self.restore_user(event, event.member)
@Plugin.listen('GuildMemberUpdate', priority=Priority.BEFORE)
def on_guild_member_update(self, event):
pre_member = event.guild.members.get(event.id)
if not pre_member:
return
pre_roles = set(pre_member.roles)
post_roles = set(event.roles)
if pre_roles == post_roles:
return
removed = pre_roles - post_roles
# If the user was unmuted, mark any temp-mutes as inactive
if event.config.mute_role in removed:
Infraction.clear_active(event, event.user.id, [Infraction.Types.TEMPMUTE])
@Plugin.listen('GuildBanRemove')
def on_guild_ban_remove(self, event):
Infraction.clear_active(event, event.user.id, [Infraction.Types.BAN, Infraction.Types.TEMPBAN])
@Plugin.listen('GuildRoleUpdate', priority=Priority.BEFORE)
def on_guild_role_update(self, event):
if event.role.id not in event.config.locked_roles:
return
if event.role.id in self.unlocked_roles and self.unlocked_roles[event.role.id] > time.time():
return
if event.role.id in self.role_debounces:
if self.role_debounces.pop(event.role.id) > time.time():
return
role_before = event.guild.roles.get(event.role.id)
if not role_before:
return
to_update = {}
for field in ('name', 'hoist', 'color', 'permissions', 'position'):
if getattr(role_before, field) != getattr(event.role, field):
to_update[field] = getattr(role_before, field)
if to_update:
self.log.warning('Rolling back update to role %s (in %s), role is locked', event.role.id, event.guild_id)
self.role_debounces[event.role.id] = time.time() + 60
event.role.update(**to_update)
@Plugin.command('unban', '<user:snowflake> [reason:str...]', level=CommandLevels.MOD)
def unban(self, event, user, reason=None):
try:
GuildBan.get(user_id=user, guild_id=event.guild.id)
event.guild.delete_ban(user)
GuildBan.delete().where(
(GuildBan.user_id == event.author.id) &
(GuildBan.guild_id == event.guild.id)
)
except (GuildBan.DoesNotExist, APIException) as e:
if hasattr(e, 'code') and e.code != 10026: # Unknown Ban
raise APIException(e.response)
raise CommandFail('user with id `{}` is not banned'.format(user))
Infraction.create(
guild_id=event.guild.id,
user_id=user,
actor_id=event.author.id,
type_=Infraction.Types.UNBAN,
reason=reason
)
raise CommandSuccess('unbanned user with id `{}`'.format(user))
@Plugin.command('archive', group='infractions', level=CommandLevels.ADMIN)
def infractions_archive(self, event):
user = User.alias()
actor = User.alias()
q = Infraction.select(Infraction, user, actor).join(
user,
on=((Infraction.user_id == user.user_id).alias('user'))
).switch(Infraction).join(
actor,
on=((Infraction.actor_id == actor.user_id).alias('actor'))
).where(Infraction.guild_id == event.guild.id)
buff = StringIO()
w = csv.writer(buff)
for inf in q:
w.writerow([
inf.id,
inf.user_id,
str(inf.user),
inf.actor_id,
str(inf.actor),
str({i.index: i for i in Infraction.Types.attrs}[inf.type_]),
str(inf.reason),
])
event.msg.reply('Ok, here is an archive of all infractions', attachments=[
('infractions.csv', buff.getvalue())
])
def find_infraction(self, event, infraction):
if isinstance(infraction, int) or infraction.isdigit():
try:
return Infraction.get(id=infraction)
except Infraction.DoesNotExist:
return
infraction = infraction.lower()
q = (Infraction.guild_id == event.guild.id)
if infraction in ('ml', 'mylatest'):
q &= (Infraction.actor_id == event.author.id)
elif infraction not in ('l', 'latest'):
raise CommandFail('invalid argument: must be an infraction ID, `l/latest`, or `ml/mylatest`')
try:
return Infraction.select(Infraction).where(q).order_by(Infraction.created_at.desc()).limit(1).get()
except Infraction.DoesNotExist:
return
@Plugin.command('info', '<infraction:int>', group='infractions', level=CommandLevels.MOD)
def infraction_info(self, event, infraction):
try:
user = User.alias()
actor = User.alias()
infraction = Infraction.select(Infraction, user, actor).join(
user,
on=((Infraction.user_id == user.user_id).alias('user'))
).switch(Infraction).join(
actor,
on=((Infraction.actor_id == actor.user_id).alias('actor'))
).where(
(Infraction.id == infraction) &
(Infraction.guild_id == event.guild.id)
).get()
except Infraction.DoesNotExist:
raise CommandFail('cannot find an infraction with ID `{}`'.format(infraction))
type_ = {i.index: i for i in Infraction.Types.attrs}[infraction.type_]
embed = MessageEmbed()
if type_ in (Infraction.Types.MUTE, Infraction.Types.TEMPMUTE, Infraction.Types.TEMPROLE, Infraction.Types.WARNING):
embed.color = 0xfdfd96
elif type_ in (Infraction.Types.KICK, Infraction.Types.SOFTBAN):
embed.color = 0xffb347
else:
embed.color = 0xff6961
embed.title = str(type_).title()
embed.set_thumbnail(url=infraction.user.get_avatar_url())
embed.add_field(name='User', value=infraction.user, inline=True)
embed.add_field(name='Moderator', value=infraction.actor, inline=True)
embed.add_field(name='ID', value=infraction.id, inline=True)
embed.add_field(name='Active', value='yes' if infraction.active else 'no', inline=True)
if infraction.active and infraction.expires_at:
embed.add_field(name='Expires in', value=humanize_duration(infraction.expires_at - datetime.utcnow()))
embed.add_field(name='Reason', value=infraction.reason or '*Not specified*', inline=False)
embed.timestamp = infraction.created_at.isoformat()
event.msg.reply('', embed=embed)
@Plugin.command('search', '[query:user|str...]', group='infractions', level=CommandLevels.MOD)
def infraction_search(self, event, query=None):
q = (Infraction.guild_id == event.guild.id)
if query and isinstance(query, list) and isinstance(query[0], DiscoUser):
query = query[0].id
elif query:
query = ' '.join(map(str, query))
if query and (isinstance(query, int) or query.isdigit()):
q &= (
(Infraction.id == int(query)) |
(Infraction.user_id == int(query)) |
(Infraction.actor_id == int(query)))
elif query:
q &= (Infraction.reason ** query)
user = User.alias()
actor = User.alias()
infractions = Infraction.select(Infraction, user, actor).join(
user,
on=((Infraction.user_id == user.user_id).alias('user'))
).switch(Infraction).join(
actor,
on=((Infraction.actor_id == actor.user_id).alias('actor'))
).where(q).order_by(Infraction.created_at.desc()).limit(8)
if not infractions:
return event.msg.reply('No infractions found for the given query.')
tbl = MessageTable()
tbl.set_header('ID', 'Created', 'Type', 'User', 'Moderator', 'Active', 'Reason')
for inf in infractions:
type_ = {i.index: i for i in Infraction.Types.attrs}[inf.type_]
reason = inf.reason or ''
if len(reason) > 256:
reason = reason[:256] + '…'
if inf.active:
active = 'yes'
if inf.expires_at:
active += ' (expires in {})'.format(humanize.naturaldelta(inf.expires_at - datetime.utcnow()))
else:
active = 'no'
old_size = tbl.size_index.copy()
tbl.add(
inf.id,
inf.created_at.isoformat(),
str(type_),
str(inf.user),
str(inf.actor),
active,
clamp(reason, 128)
)
if len(tbl.compile()) > 2000:
tbl.entries.pop()
tbl.size_index = old_size.copy()
break
event.msg.reply(tbl.compile())
# Thanks OGNovuh / Terminator966
@Plugin.command('recent', aliases=['latest'], group='infractions', level=CommandLevels.MOD)
def infractions_recent(self, event):
user = User.alias()
actor = User.alias()
infraction = Infraction.select(Infraction, user, actor).join(
user,
on=((Infraction.user_id == user.user_id).alias('user'))
).switch(Infraction).join(
actor,
on=((Infraction.actor_id == actor.user_id).alias('actor'))
).where(
(Infraction.guild_id == event.guild.id)
).order_by(Infraction.created_at.desc()).limit(1).get()
type_ = {i.index: i for i in Infraction.Types.attrs}[infraction.type_]
embed = MessageEmbed()
if type_ in (Infraction.Types.MUTE, Infraction.Types.TEMPMUTE, Infraction.Types.TEMPROLE, Infraction.Types.WARNING):
embed.color = 0xfdfd96
elif type_ in (Infraction.Types.KICK, Infraction.Types.SOFTBAN):
embed.color = 0xffb347
else:
embed.color = 0xff6961
embed.title = str(type_).title()
embed.set_thumbnail(url=infraction.user.get_avatar_url())
embed.add_field(name='User', value=str(infraction.user), inline=True)
embed.add_field(name='Moderator', value=str(infraction.actor), inline=True)
embed.add_field(name='ID', value=infraction.id, inline=True)
embed.add_field(name='Active', value='yes' if infraction.active else 'no', inline=True)
if infraction.active and infraction.expires_at:
embed.add_field(name='Expires in', value=humanize_duration(infraction.expires_at - datetime.utcnow()))
embed.add_field(name='Reason', value=infraction.reason or '*Not specified*', inline=False)
embed.timestamp = infraction.created_at.isoformat()
event.msg.reply('', embed=embed)
# Thanks OGNovuh / Terminator966
@Plugin.command('delete', '<infraction:int>', group='infractions', aliases=['remove', 'del', 'rem', 'rm', 'rmv'], level=-1)
def infraction_delete(self, event, infraction):
try:
inf = Infraction.select(Infraction).where(
(Infraction.id == infraction)
).get()
except Infraction.DoesNotExist:
raise CommandFail('cannot find an infraction with ID `{}`'.format(infraction))
msg = event.msg.reply('Ok, delete infraction #{}?'.format(infraction))
msg.chain(False).\
add_reaction(GREEN_TICK_EMOJI).\
add_reaction(RED_TICK_EMOJI)
try:
mra_event = self.wait_for_event(
'MessageReactionAdd',
message_id=msg.id,
conditional=lambda e: (
e.emoji.id in (GREEN_TICK_EMOJI_ID, RED_TICK_EMOJI_ID) and
e.user_id == event.author.id
)).get(timeout=10)
except gevent.Timeout:
return
finally:
msg.delete()
if mra_event.emoji.id != GREEN_TICK_EMOJI_ID:
return
inf.delete_instance()
self.queue_infractions()
raise CommandSuccess('deleted infraction #{}.'.format(infraction))
@Plugin.command('duration', '<infraction:int|str> <duration:str>', group='infractions', level=CommandLevels.MOD)
def infraction_duration(self, event, infraction, duration):
inf = self.find_infraction(event, infraction)
if inf.actor_id != event.author.id and event.user_level < CommandLevels.ADMIN:
raise CommandFail('only administrators can modify the duration of infractions created by other moderators')
if not inf.active:
raise CommandFail('that infraction is not active and cannot be updated')
expires_dt = parse_duration(duration, inf.created_at)
converted = False
if inf.type_ in [Infraction.Types.MUTE.index, Infraction.Types.BAN.index]:
inf.type_ = (
Infraction.Types.TEMPMUTE
if inf.type_ == Infraction.Types.MUTE.index else
Infraction.Types.TEMPBAN
)
converted = True
elif inf.type_ not in [
Infraction.Types.TEMPMUTE.index,
Infraction.Types.TEMPBAN.index,
Infraction.Types.TEMPROLE.index]:
raise CommandFail('cannot set the duration for that type of infraction')
inf.expires_at = expires_dt
inf.save()
self.queue_infractions()
if converted:
raise CommandSuccess('ok, I\'ve made that infraction temporary, it will now expire on {}'.format(
inf.expires_at.isoformat()
))
else:
raise CommandSuccess('ok, I\'ve updated that infractions duration, it will now expire on {}'.format(
inf.expires_at.isoformat()
))
@Plugin.command('reason', '<infraction:int|str> <reason:str...>', level=CommandLevels.MOD)
@Plugin.command('reason', '<infraction:int|str> <reason:str...>', group='infractions', level=CommandLevels.MOD)
def reason(self, event, infraction, reason):
inf = self.find_infraction(event, infraction)
if not inf or inf.guild_id != event.guild.id:
event.msg.reply('Unknown infraction ID')
return
if not inf.actor_id:
inf.actor_id = event.author.id
if inf.actor_id != event.author.id and event.user_level < event.config.reason_edit_level:
raise CommandFail('you do not have the permissions required to edit other moderators infractions')
inf.reason = reason
inf.save()
raise CommandSuccess('I\'ve updated the reason for infraction #{}'.format(inf.id))
# Full credit goes to: Xenthys
@Plugin.command('roles', '[pattern:str...]', level=CommandLevels.MOD)
def roles(self, event, pattern=None):
buff = ''
g = event.guild
total = {}
members = g.members.values()
for member in members:
for role_id in member.roles:
total[role_id] = total.get(role_id, 0) + 1
roles = g.roles.values()
roles = sorted(roles, key=lambda r: r.position, reverse=True)
for role in roles:
if pattern and role.name.lower().find(pattern.lower()) == -1: continue
role_members = total.get(role.id, 0) if role.id != g.id else len(g.members)
role = S('{} - {} ({} member{})\n'.format(
role.id,
role.name,
role_members,
's' if role_members != 1 else ''
), escape_codeblocks=True)
if len(role) + len(buff) > 1990:
event.msg.reply('```dns\n{}```'.format(buff))
buff = ''
buff += role
if not buff:
return
return event.msg.reply('```dns\n{}```'.format(buff))
@Plugin.command('restore', '<user:user>', level=CommandLevels.MOD, group='backups')
def restore(self, event, user):
member = event.guild.get_member(user)
if member:
self.restore_user(event, member)
else:
raise CommandFail('invalid user')
@Plugin.command('clear', '<user_id:snowflake>', level=CommandLevels.MOD, group='backups')
def backups_clear(self, event, user_id):
deleted = bool(GuildMemberBackup.delete().where(
(GuildMemberBackup.user_id == user_id) &
(GuildMemberBackup.guild_id == event.guild.id)
).execute())
if deleted:
event.msg.reply(':ok_hand: I\'ve cleared the member backup for that user')
else:
raise CommandFail('I couldn\'t find any member backups for that user')
def can_act_on(self, event, victim_id, throw=True):
if event.author.id == victim_id:
if not throw:
return False
raise CommandFail('cannot execute that action on yourself')
victim_level = self.bot.plugins.get('CorePlugin').get_level(event.guild, victim_id)
if event.user_level <= victim_level:
if not throw:
return False
raise CommandFail('invalid permissions')
perms = event.guild.get_permissions(self.bot.client.state.me)
if not perms.ban_members and not perms.administrator:
if not throw:
return False
raise CommandFail('missing permissions')
if hasattr(event, 'action') and event.action in ('kick', 'ban'):
member_me = event.guild.get_member(self.bot.client.state.me)
member = event.guild.get_member(victim_id)
if member.roles:
highest_role_me = sorted(
[member_me.guild.roles.get(r) for r in member_me.roles],
key=lambda r: r.position,
reverse=True)
highest_role = sorted(
[event.guild.roles.get(r) for r in member.roles],
key=lambda i: i.position,
reverse=True)
if member.owner or (highest_role[0].position >= highest_role_me[0].position):
if not throw:
return False
raise CommandFail('cannot execute that action on that member')
return True
@Plugin.command('mute', '<user:user|snowflake> [reason:str...]', level=CommandLevels.MOD)
@Plugin.command('tempmute', '<user:user|snowflake> <duration:str> [reason:str...]', level=CommandLevels.MOD)
def tempmute(self, event, user, duration=None, reason=None):
if not duration and reason:
duration = parse_duration(reason.split(' ')[0], safe=True)
if duration:
if ' ' in reason:
reason = reason.split(' ', 1)[-1]
else:
reason = None
elif duration:
duration = parse_duration(duration)
member = event.guild.get_member(user)
if member:
self.can_act_on(event, member.id)
if not event.config.mute_role:
raise CommandFail('mute is not setup on this server')
if event.config.mute_role in member.roles:
raise CommandFail('{} is already muted'.format(member.user))
# If we have a duration set, this is a tempmute
if duration:
# Create the infraction
try:
self.send_infraction_dm(
event,
member.id,
'tempmute',
event.msg.guild.name,
event.author,
reason,
duration
)
except APIException:
pass
Infraction.tempmute(self, event, member, reason, duration)
self.queue_infractions()
if event.config.confirm_actions:
event.msg.reply(maybe_string(
reason,
':ok_hand: {u} is now muted for {t} (`{o}`)',
':ok_hand: {u} is now muted for {t}',
u=member.user,
t=humanize_duration(duration - datetime.utcnow()),
))
else:
existed = False
# If the user is already muted check if we can take this from a temp
# to perma mute.
if event.config.mute_role in member.roles:
existed = Infraction.clear_active(event, member.id, [Infraction.Types.TEMPMUTE])
# The user is 100% muted and not tempmuted at this point, so lets bail
if not existed:
raise CommandFail('{} is already muted'.format(member.user))
try:
self.send_infraction_dm(
event,
member.id,
'mute',
event.msg.guild.name,
event.author,
reason
)
except APIException:
pass
Infraction.mute(self, event, member, reason)
if event.config.confirm_actions:
existed =' [was temp-muted]' if existed else ''
event.msg.reply(maybe_string(
reason,
':ok_hand: {u} is now muted (`{o}`)' + existed,
':ok_hand: {u} is now muted' + existed,
u=member.user,
))
else:
raise CommandFail('invalid user')
@Plugin.command(
'temprole',
'<user:user|snowflake> <role:snowflake|str> <duration:str> [reason:str...]',
level=CommandLevels.MOD)
def temprole(self, event, user, role, duration, reason=None):
member = event.guild.get_member(user)
if not member:
raise CommandFail('invalid user')
self.can_act_on(event, member.id)
role_id = role if isinstance(role, int) else event.config.role_aliases.get(role.lower())
if not role_id or role_id not in event.guild.roles:
raise CommandFail('invalid or unknown role')
if role_id in member.roles:
raise CommandFail('{} is already in that role'.format(member.user))
expire_dt = parse_duration(duration)
Infraction.temprole(self, event, member, role_id, reason, expire_dt)
self.queue_infractions()
if event.config.confirm_actions:
event.msg.reply(maybe_string(
reason,
':ok_hand: {u} is now in the {r} role for {t} (`{o}`)',
':ok_hand: {u} is now in the {r} role for {t}',
r=event.guild.roles[role_id].name,
u=member.user,
t=humanize_duration(expire_dt - datetime.utcnow()),
))
@Plugin.command('unmute', '<user:user|snowflake>', level=CommandLevels.MOD)
def unmute(self, event, user, reason=None):
# TOOD: eventually we should pull the role from the GuildMemberBackup if they arent in server
member = event.guild.get_member(user)
if member:
self.can_act_on(event, member.id)
if not event.config.mute_role:
raise CommandFail('mute is not setup on this server')
if event.config.mute_role not in member.roles:
raise CommandFail('{} is not muted'.format(member.user))
Infraction.clear_active(event, member.id, [Infraction.Types.MUTE, Infraction.Types.TEMPMUTE])
self.call(
'ModLogPlugin.create_debounce',
event,
['GuildMemberUpdate'],
role_id=event.config.mute_role,
)
member.remove_role(event.config.mute_role)
self.call(
'ModLogPlugin.log_action_ext',
Actions.MEMBER_UNMUTED,
event.guild.id,
member=member,
actor=event.author if event.author.id != member.id else 'Automatic',
)
if event.config.confirm_actions:
event.msg.reply(':ok_hand: {} is now unmuted'.format(member.user))
else:
raise CommandFail('invalid user')
@Plugin.command('kick', '<user:user|snowflake> [reason:str...]', level=CommandLevels.MOD)
def kick(self, event, user, reason=None):
event.action = 'kick'
member = event.guild.get_member(user)
if member:
self.can_act_on(event, member.id)
try:
self.send_infraction_dm(
event,
member.id,
'kick',
event.msg.guild.name,
event.author,
reason
)
except APIException:
pass
Infraction.kick(self, event, member, reason)
if event.config.confirm_actions:
event.msg.reply(maybe_string(
reason,
':ok_hand: kicked {u} (`{o}`)',
':ok_hand: kicked {u}',
u=member.user,
))
else:
raise CommandFail('invalid user')
@Plugin.command('mkick', parser=True, level=CommandLevels.MOD)
@Plugin.parser.add_argument('users', type=int, nargs='+')
@Plugin.parser.add_argument('-r', '--reason', default='', help='reason for modlog')
#@Plugin.parser.add_argument('-c', '--clean', type=int, default=0, help='days of messages to clean')
def mkick(self, event, args):
event.action = 'kick'
members = []; cannot = []
users = list(set(args.users))
for user_id in users:
member = event.guild.get_member(user_id)
if member and self.can_act_on(event, member.id, throw=False):
members.append(member)
else:
cannot.append(user_id)
if not members:
raise CommandFail('invalid user{}'.format('s' if len(users)>1 else ''))
nope = ''
if cannot:
nope = '\n*Ignoring {} user{} who cannot be kicked.*'.format(len(cannot), 's' if len(cannot)>1 else '')
msg = event.msg.reply('Ok, kick {} user{} for `{}`?{}'.format(
len(members), 's' if len(members)>1 else '', args.reason or 'no reason', nope
))
msg.chain(False).add_reaction(GREEN_TICK_EMOJI).add_reaction(RED_TICK_EMOJI)
try:
mra_event = self.wait_for_event(
'MessageReactionAdd',
message_id=msg.id,
conditional=lambda e: (
e.emoji.id in (GREEN_TICK_EMOJI_ID, RED_TICK_EMOJI_ID) and
e.user_id == event.author.id
)).get(timeout=10)
except gevent.Timeout:
return
finally:
msg.delete()
if mra_event.emoji.id != GREEN_TICK_EMOJI_ID:
return
failed = []
succeeded = []
for member in members:
try:
self.send_infraction_dm(
event,
member.id if hasattr(member, 'id') else member,
'kick',
event.msg.guild.name,
event.author,
args.reason
)
except APIException:
pass
try:
Infraction.kick(self, event, member, args.reason)
succeeded.append(member)
except APIException:
if not isinstance(member, int):
User.from_disco_user(member.user)
member = member.user.id
failed.append(member)
members = len(members)
total = len(succeeded)
if total == 0:
raise CommandFail('could not kick any member')
#self.perform_cleanup(succeeded, args.clean, event.guild.id)
reply = 'kicked {}/{} member{}'.format(total, members, 's' if members>1 else '')
if failed:
reply += '\nThe following ID{} could not be kicked: `{}`'.format(
's' if len(failed)>1 else '',
'`, `'.join([str(id) for id in failed])
)
raise CommandSuccess(reply)
@Plugin.command('ban', '<user:user|snowflake> [reason:str...]', level=CommandLevels.MOD)
@Plugin.command('forceban', '<user:snowflake> [reason:str...]', level=CommandLevels.MOD)
@Plugin.command('cleanban', '<user:user|snowflake> <delete_message_days:int> [reason:str...]', aliases=['cban'], level=CommandLevels.MOD)
def ban(self, event, user, reason=None, delete_message_days=0, mode=None):
if isinstance(user, int):
user_id = user
self.can_act_on(event, user)
else:
user_id = user.id
member = event.guild.get_member(user)
if member:
user = member
event.action = 'ban'
self.can_act_on(event, user_id)
else:
user = user_id
try:
self.send_infraction_dm(
event,
user_id,
'ban',
event.msg.guild.name,
event.author,
reason,
)
except APIException:
pass
try:
Infraction.ban(self, event, user, reason, guild=event.guild, delete_message_days=delete_message_days)
except APIException:
raise CommandFail('invalid user')
if event.config.confirm_actions:
event.msg.reply(maybe_string(
reason,
':ok_hand: banned {u} (`{o}`)',
':ok_hand: banned {u}',
u=user.user if isinstance(user, GuildMember) else user,
))
@Plugin.command('mban', parser=True, level=CommandLevels.MOD)
@Plugin.parser.add_argument('users', type=int, nargs='+')
@Plugin.parser.add_argument('-r', '--reason', default='', help='reason for modlog')
@Plugin.parser.add_argument('-h', '--hide', action='store_true', help='hide names like forceban')
@Plugin.parser.add_argument('-c', '--clean', type=int, default=0, help='days of messages to clean')
def mban(self, event, args):
members = []; cannot = []
users = list(set(args.users))
for user_id in users:
if self.can_act_on(event, user_id, throw=False):
member = None if args.hide else event.guild.get_member(user_id)
members.append(member if member else user_id)
else:
cannot.append(user_id)
if not members:
raise CommandFail('invalid user{}'.format('s' if len(users)>1 else ''))
nope = ''
if cannot:
nope = '\n*Ignoring {} user{} who cannot be banned.*'.format(len(cannot), 's' if len(cannot)>1 else '')
msg = event.msg.reply('Ok, ban {} user{} for `{}`?{}'.format(
len(members), 's' if len(members)>1 else '', args.reason or 'no reason', nope
))
msg.chain(False).add_reaction(GREEN_TICK_EMOJI).add_reaction(RED_TICK_EMOJI)
try:
mra_event = self.wait_for_event(
'MessageReactionAdd',
message_id=msg.id,
conditional=lambda e: (
e.emoji.id in (GREEN_TICK_EMOJI_ID, RED_TICK_EMOJI_ID) and
e.user_id == event.author.id
)).get(timeout=10)
except gevent.Timeout:
return
finally:
msg.delete()
if mra_event.emoji.id != GREEN_TICK_EMOJI_ID:
return
failed = []
succeeded = []
for member in members:
try:
self.send_infraction_dm(
event,
member.id if hasattr(member, 'id') else member,
'ban',
event.msg.guild.name,
event.author,
args.reason
)
except APIException:
pass
try:
Infraction.ban(self, event, member, args.reason, guild=event.guild, delete_message_days=args.clean)
succeeded.append(member)
except APIException:
if not isinstance(member, int):
User.from_disco_user(member.user)
member = member.id
failed.append(member)
members = len(members)
total = len(succeeded)
if total == 0:
raise CommandFail('could not ban any user')
#self.perform_cleanup(succeeded, args.clean, event.guild.id)
reply = 'banned {}/{} user{}'.format(total, members, 's' if members > 1 else '')
if failed:
reply += '\nThe following ID{} could not be banned: `{}`'.format(
's' if len(failed) > 1 else '',
'`, `'.join([str(id) for id in failed])
)
raise CommandSuccess(reply)
@Plugin.command('softban', '<user:user|snowflake> [reason:str...]', level=CommandLevels.MOD)
def softban(self, event, user, reason=None):
"""
Ban then unban a user from the server (with an optional reason for the modlog)
"""
event.action = 'kick'
member = event.guild.get_member(user)
if member:
self.can_act_on(event, member.id)
try:
self.send_infraction_dm(
event,
member.id,
'kick',
event.msg.guild.name,
event.author,
reason
)
except APIException:
pass
Infraction.softban(self, event, member, reason)
if event.config.confirm_actions:
event.msg.reply(maybe_string(
reason,
':ok_hand: soft-banned {u} (`{o}`)',
':ok_hand: soft-banned {u}',
u=member.user,
))
else:
raise CommandFail('invalid user')
@Plugin.command('tempban', '<user:user|snowflake> <duration:str> [reason:str...]', level=CommandLevels.MOD)
def tempban(self, event, duration, user, reason=None):
event.action = 'ban'
member = event.guild.get_member(user)
if member:
self.can_act_on(event, member.id)
expires_dt = parse_duration(duration)
try:
self.send_infraction_dm(
event,
member.id,
'tempban',
event.msg.guild.name,
event.author,
reason,
expires_dt
)
except APIException:
pass
Infraction.tempban(self, event, member, reason, expires_dt)
self.queue_infractions()
if event.config.confirm_actions:
event.msg.reply(maybe_string(
reason,
':ok_hand: temp-banned {u} for {t} (`{o}`)',
':ok_hand: temp-banned {u} for {t}',
u=member.user,
t=humanize_duration(expires_dt - datetime.utcnow()),
))
else:
raise CommandFail('invalid user')
@Plugin.command('warn', '<user:user|snowflake> [reason:str...]', level=CommandLevels.MOD)
def warn(self, event, user, reason=None):
member = None
member = event.guild.get_member(user)
if member:
self.can_act_on(event, member.id)
try:
self.send_infraction_dm(
event,
member.id,
'warn',
event.msg.guild.name,
event.author,
reason
)
except APIException:
pass
Infraction.warn(self, event, member, reason, guild=event.guild)
else:
raise CommandFail('invalid user')
if event.config.confirm_actions:
event.msg.reply(maybe_string(
reason,
':ok_hand: warned {u} (`{o}`)',
':ok_hand: warned {u}',
u=member.user if member else user,
))
@Plugin.command('here', '[size:int]', level=CommandLevels.MOD, context={'mode': 'all'}, group='archive')
@Plugin.command('all', '[size:int]', level=CommandLevels.MOD, context={'mode': 'all'}, group='archive')
@Plugin.command(
'user',
'<user:user|snowflake> [size:int]',
level=CommandLevels.MOD,
context={'mode': 'user'},
group='archive')
@Plugin.command(
'channel',
'<channel:channel|snowflake> [size:int]',
level=CommandLevels.MOD,
context={'mode': 'channel'},
group='archive')
def archive(self, event, size=50, mode=None, user=None, channel=None):
if size < 1 or size > 15000:
raise CommandFail('too many messages must be between 1-15000')
q = Message.select(Message.id).join(User).order_by(Message.id.desc()).limit(size)
if mode in ('all', 'channel'):
cid = event.channel.id
if channel:
cid = channel if isinstance(channel, int) else channel.id
channel = event.guild.channels.get(cid)
if not channel:
raise CommandFail('channel not found')
perms = channel.get_permissions(event.author)
if not (perms.administrator or perms.view_channel):
raise CommandFail('invalid permissions')
q = q.where(Message.channel_id == cid)
else:
user_id = user if isinstance(user, int) else user.id
if event.author.id != user_id:
self.can_act_on(event, user_id)
q = q.where(
(Message.author_id == user_id) &
(Message.guild_id == event.guild.id)
)
archive = MessageArchive.create_from_message_ids([i.id for i in q])
event.msg.reply('OK, archived {} messages at {}'.format(len(archive.message_ids), archive.url))
@Plugin.command('extend', '<archive_id:str> <duration:str>', level=CommandLevels.MOD, group='archive')
def archive_extend(self, event, archive_id, duration):
try:
archive = MessageArchive.get(archive_id=archive_id)
except MessageArchive.DoesNotExist:
raise CommandFail('invalid message archive id')
archive.expires_at = parse_duration(duration)
MessageArchive.update(
expires_at=parse_duration(duration)
).where(
(MessageArchive.archive_id == archive_id)
).execute()
raise CommandSuccess('duration of archive {} has been extended (<{}>)'.format(
archive_id,
archive.url,
))
@Plugin.command('clean cancel', level=CommandLevels.MOD)
def clean_cacnel(self, event):
if event.channel.id not in self.cleans:
raise CommandFail('no clean is running in this channel')
self.cleans[event.channel.id].kill()
event.msg.reply('Ok, the running clean was cancelled')
@Plugin.command('clean all', '[size:int]', level=CommandLevels.MOD, context={'mode': 'all'})
@Plugin.command('clean bots', '[size:int]', level=CommandLevels.MOD, context={'mode': 'bots'})
@Plugin.command('clean user', '<user:user> [size:int]', level=CommandLevels.MOD, context={'mode': 'user'})
def clean(self, event, user=None, size=25, typ=None, mode='all'):
"""
Removes messages
"""
if size < 1 or size > 10000:
raise CommandFail('too many messages must be between 1-10000')
if event.channel.id in self.cleans:
raise CommandFail('a clean is already running on this channel')
query = Message.select(Message.id).where(
(Message.deleted >> False) &
(Message.channel_id == event.channel.id) &
(Message.timestamp > (datetime.utcnow() - timedelta(days=13)))
).join(User).order_by(Message.timestamp.desc()).limit(size)
if mode == 'bots':
query = query.where((User.bot >> True))
elif mode == 'user':
query = query.where((User.user_id == user.id))
messages = [i[0] for i in query.tuples()]
if len(messages) == 1:
return self.client.api.channels_messages_delete(event.channel.id, messages[0])
if len(messages) > 100:
msg = event.msg.reply('Woah there, that will delete a total of {} messages, please confirm.'.format(
len(messages)
))
msg.chain(False).\
add_reaction(GREEN_TICK_EMOJI).\
add_reaction(RED_TICK_EMOJI)
try:
mra_event = self.wait_for_event(
'MessageReactionAdd',
message_id=msg.id,
conditional=lambda e: (
e.emoji.id in (GREEN_TICK_EMOJI_ID, RED_TICK_EMOJI_ID) and
e.user_id == event.author.id
)).get(timeout=10)
except gevent.Timeout:
return
finally:
msg.delete()
if mra_event.emoji.id != GREEN_TICK_EMOJI_ID:
return
event.msg.reply(':wastebasket: Ok please hold on while I delete those messages…').after(5).delete()
def run_clean():
for chunk in chunks(messages, 100):
self.client.api.channels_messages_delete_bulk(event.channel.id, chunk)
self.cleans[event.channel.id] = gevent.spawn(run_clean)
self.cleans[event.channel.id].join()
del self.cleans[event.channel.id]
@Plugin.command(
'add',
'<user:user> <role:str> [reason:str...]',
level=CommandLevels.MOD,
context={'mode': 'add'},
group='role')
@Plugin.command(
'rmv',
'<user:user> <role:str> [reason:str...]',
level=CommandLevels.MOD,
context={'mode': 'remove'},
group='role')
@Plugin.command('remove',
'<user:user> <role:str> [reason:str...]',
level=CommandLevels.MOD,
context={'mode': 'remove'},
group='role')
def role_add(self, event, user, role, reason=None, mode=None):
role_obj = None
if role.isdigit() and int(role) in event.guild.roles.keys():
role_obj = event.guild.roles[int(role)]
elif role.lower() in event.config.role_aliases:
role_obj = event.guild.roles.get(event.config.role_aliases[role.lower()])
else:
# First try exact match
exact_matches = [i for i in event.guild.roles.values() if i.name.lower().replace(' ', '') == role.lower()]
if len(exact_matches) == 1:
role_obj = exact_matches[0]
else:
# Otherwise we fuzz it up
rated = sorted([
(fuzz.partial_ratio(role, r.name.replace(' ', '')), r) for r in event.guild.roles.values()
], key=lambda i: i[0], reverse=True)
if rated[0][0] > 40:
if len(rated) == 1:
role_obj = rated[0][1]
elif rated[0][0] - rated[1][0] > 20:
role_obj = rated[0][1]
if not role_obj:
raise CommandFail('too many matches for that role, try something more exact or the role ID')
author_member = event.guild.get_member(event.author)
highest_role = sorted(
[event.guild.roles.get(r) for r in author_member.roles],
key=lambda i: i.position,
reverse=True)
if not author_member.owner and (not highest_role or highest_role[0].position <= role_obj.position):
raise CommandFail('you can only {} roles that are ranked lower than your highest role'.format(mode))
member = event.guild.get_member(user)
if not member:
raise CommandFail('invalid member')
self.can_act_on(event, member.id)
if mode == 'add' and role_obj.id in member.roles:
raise CommandFail('{} already has the {} role'.format(member, role_obj.name))
elif mode == 'remove' and role_obj.id not in member.roles:
raise CommandFail('{} doesn\'t have the {} role'.format(member, role_obj.name))
self.call(
'ModLogPlugin.create_debounce',
event,
['GuildMemberUpdate'],
role_id=role_obj.id,
)
if mode == 'add':
member.add_role(role_obj.id)
else:
member.remove_role(role_obj.id)
self.call(
'ModLogPlugin.log_action_ext',
(Actions.MEMBER_ROLE_ADD if mode == 'add' else Actions.MEMBER_ROLE_REMOVE),
event.guild.id,
member=member,
role=role_obj,
actor=event.author,
reason=reason or 'no reason',
)
event.msg.reply(':ok_hand: {} role {} {} {}'.format(
'added' if mode == 'add' else 'removed',
role_obj.name,
'to' if mode == 'add' else 'from',
member
))
@Plugin.command('stats', '<user:user>', level=CommandLevels.MOD)
def msgstats(self, event, user):
# Query for the basic aggregate message statistics
message_stats = Message.select(
fn.Count('*'),
fn.Sum(fn.char_length(Message.content)),
fn.Sum(fn.array_length(Message.emojis, 1)),
fn.Sum(fn.array_length(Message.mentions, 1)),
fn.Sum(fn.array_length(Message.attachments, 1)),
).where(
(Message.author_id == user.id)
).tuples()
reactions_given = Reaction.select(
fn.Count('*'),
Reaction.emoji_id,
Reaction.emoji_name,
).join(
Message,
on=(Message.id == Reaction.message_id)
).where(
(Reaction.user_id == user.id)
).group_by(
Reaction.emoji_id, Reaction.emoji_name
).order_by(fn.Count('*').desc()).tuples()
# Query for most used emoji
emojis = Message.raw('''
SELECT gm.emoji_id, gm.name, count(*)
FROM (
SELECT unnest(emojis) as id
FROM messages
WHERE author_id=%s
) q
JOIN guild_emojis gm ON gm.emoji_id=q.id
GROUP BY 1, 2
ORDER BY 3 DESC
LIMIT 1
''', (user.id, )).tuples()
deleted = Message.select(
fn.Count('*')
).where(
(Message.author_id == user.id) &
(Message.deleted == 1)
).tuples()
# If we hit an exception executing the core query, throw an exception
if message_stats.exception:
message_stats.get()
q = message_stats.value[0]
embed = MessageEmbed()
embed.fields.append(
MessageEmbedField(name='Total Messages Sent', value=q[0] or '0', inline=True))
embed.fields.append(
MessageEmbedField(name='Total Characters Sent', value=q[1] or '0', inline=True))
if deleted.value:
embed.fields.append(
MessageEmbedField(name='Total Deleted Messages', value=deleted.value[0][0], inline=True))
embed.fields.append(
MessageEmbedField(name='Total Custom Emojis', value=q[2] or '0', inline=True))
embed.fields.append(
MessageEmbedField(name='Total Mentions', value=q[3] or '0', inline=True))
embed.fields.append(
MessageEmbedField(name='Total Attachments', value=q[4] or '0', inline=True))
if reactions_given.value:
reactions_given = reactions_given.value
embed.fields.append(
MessageEmbedField(name='Total Reactions', value=sum(i[0] for i in reactions_given), inline=True))
emoji = (
reactions_given[0][2]
if not reactions_given[0][1] else
'<:{}:{}>'.format(reactions_given[0][2], reactions_given[0][1])
)
embed.fields.append(
MessageEmbedField(name='Most Used Reaction', value='{} (used {} times)'.format(
emoji,
reactions_given[0][0],
), inline=True))
if emojis.value:
emojis = list(emojis.value)
if emojis:
embed.add_field(
name='Most Used Emoji',
value='<:{1}:{0}> (`{1}`, used {2} times)'.format(*emojis[0]))
embed.thumbnail = MessageEmbedThumbnail(url=user.avatar_url)
embed.color = get_dominant_colors_user(user)
event.msg.reply('', embed=embed)
@Plugin.command('emojistats', '<mode:str> <sort:str>', level=CommandLevels.MOD)
def emojistats_custom(self, event, mode, sort):
if mode not in ('server', 'global'):
raise CommandFail('invalid emoji mode, must be `server` or `global`')
if sort not in ('least', 'most'):
raise CommandFail('invalid emoji sort, must be `least` or `most`')
order = 'DESC' if sort == 'most' else 'ASC'
if mode == 'server':
q = CUSTOM_EMOJI_STATS_SERVER_SQL.format(order, guild=event.guild.id)
else:
q = CUSTOM_EMOJI_STATS_GLOBAL_SQL.format(order, guild=event.guild.id)
q = list(GuildEmoji.raw(q).tuples())
tbl = MessageTable()
tbl.set_header('Count', 'Name', 'ID')
for emoji_id, name, count in q:
tbl.add(count, name, emoji_id)
event.msg.reply(tbl.compile())
@Plugin.command('prune', '[uses:int]', level=CommandLevels.ADMIN, group='invites')
def invites_prune(self, event, uses=1):
invites = [
i for i in event.guild.get_invites()
if i.uses <= uses and i.created_at < (datetime.utcnow() - timedelta(hours=1))
]
if not invites:
return event.msg.reply('I didn\'t find any invites matching your criteria')
msg = event.msg.reply(
'Ok, a total of {} invites created by {} users with {} total uses would be pruned.'.format(
len(invites),
len({i.inviter.id for i in invites}),
sum(i.uses for i in invites)
))
msg.chain(False).\
add_reaction(GREEN_TICK_EMOJI).\
add_reaction(RED_TICK_EMOJI)
try:
mra_event = self.wait_for_event(
'MessageReactionAdd',
message_id=msg.id,
conditional=lambda e: (
e.emoji.id in (GREEN_TICK_EMOJI_ID, RED_TICK_EMOJI_ID) and
e.user_id == event.author.id
)).get(timeout=10)
except gevent.Timeout:
msg.reply('Not executing invite prune')
msg.delete()
return
msg.delete()
if mra_event.emoji.id == GREEN_TICK_EMOJI_ID:
msg = msg.reply('Pruning invites…')
for invite in invites:
invite.delete()
msg.edit('Ok, invite prune completed')
else:
msg = msg.reply('Not pruning invites')
@Plugin.command(
'clean',
'<user:user|snowflake> [count:int] [emoji:str]',
level=CommandLevels.MOD,
group='reactions')
def reactions_clean(self, event, user, count=10, emoji=None):
if isinstance(user, DiscoUser):
user = user.id
if count > 50:
raise CommandFail('cannot clean more than 50 reactions')
lock = rdb.lock('clean-reactions-{}'.format(user))
if not lock.acquire(blocking=False):
raise CommandFail('already running a clean on user')
query = [
(Reaction.user_id == user),
(Message.guild_id == event.guild.id),
(Message.deleted == 0),
]
if emoji:
emoji_id = EMOJI_RE.findall(emoji)
if emoji_id:
query.append((Reaction.emoji_id == emoji_id[0]))
else:
# TODO: validation?
query.append((Reaction.emoji_name == emoji))
try:
reactions = list(Reaction.select(
Reaction.message_id,
Reaction.emoji_id,
Reaction.emoji_name,
Message.channel_id,
).join(
Message,
on=(Message.id == Reaction.message_id),
).where(
reduce(operator.and_, query)
).order_by(Reaction.message_id.desc()).limit(count).tuples())
if not reactions:
raise CommandFail('no reactions to purge')
msg = event.msg.reply('Hold on while I clean {} reactions'.format(
len(reactions)
))
for message_id, emoji_id, emoji_name, channel_id in reactions:
if emoji_id:
emoji = '{}:{}'.format(emoji_name, emoji_id)
else:
emoji = emoji_name
self.client.api.channels_messages_reactions_delete(
channel_id,
message_id,
emoji,
user)
msg.edit('Ok, I cleaned {} reactions'.format(
len(reactions),
))
finally:
lock.release()
@Plugin.command('log', '<user:user|snowflake>', group='voice', level=CommandLevels.MOD)
def voice_log(self, event, user):
if isinstance(user, DiscoUser):
user = user.id
sessions = GuildVoiceSession.select(
GuildVoiceSession.user_id,
GuildVoiceSession.channel_id,
GuildVoiceSession.started_at,
GuildVoiceSession.ended_at
).where(
(GuildVoiceSession.user_id == user) &
(GuildVoiceSession.guild_id == event.guild.id)
).order_by(GuildVoiceSession.started_at.desc()).limit(10)
tbl = MessageTable()
tbl.set_header('Channel', 'Joined At', 'Duration')
for session in sessions:
tbl.add(
(self.bot.client.state.channels.get(session.channel_id) or 'UNKNOWN'),
'{} ({} ago)'.format(
session.started_at.isoformat(),
humanize.naturaldelta(datetime.utcnow() - session.started_at)),
humanize.naturaldelta(session.ended_at - session.started_at) if session.ended_at else 'Active')
event.msg.reply(tbl.compile())
@Plugin.command('kick', '<user:user|snowflake>', group='voice', level=CommandLevels.MOD)
@Plugin.command('voicekick', '<user:user|snowflake>', aliases=['vkick'], level=CommandLevels.MOD)
def voice_kick(self, event, user):
member = event.guild.get_member(user)
if member:
if not member.get_voice_state():
raise CommandFail('member is not in a voice channel')
member.disconnect()
event.msg.reply(':ok_hand: kicked {} from voice channel'.format(member.user))
else:
raise CommandFail('invalid user')
@Plugin.command('join', '<name:str...>', aliases=['add', 'give'])
def join_role(self, event, name):
if not event.config.group_roles:
return
role = event.guild.roles.get(event.config.group_roles.get(name.lower()))
if not role:
raise CommandFail('invalid or unknown group')
has_any_admin_perms = any(role.permissions.can(i) for i in (
Permissions.KICK_MEMBERS,
Permissions.BAN_MEMBERS,
Permissions.ADMINISTRATOR,
Permissions.MANAGE_CHANNELS,
Permissions.MANAGE_GUILD,
Permissions.MANAGE_MESSAGES,
Permissions.MENTION_EVERYONE,
Permissions.MUTE_MEMBERS,
Permissions.MOVE_MEMBERS,
Permissions.MANAGE_NICKNAMES,
Permissions.MANAGE_ROLES,
Permissions.MANAGE_WEBHOOKS,
Permissions.MANAGE_EMOJIS,
))
# Sanity check
if has_any_admin_perms:
raise CommandFail('cannot join group with admin permissions')
member = event.guild.get_member(event.author)
if role.id in member.roles:
raise CommandFail('you are already a member of that group')
member.add_role(role)
if event.config.group_confirm_reactions:
event.msg.add_reaction(GREEN_TICK_EMOJI)
return
raise CommandSuccess('you have joined the {} group'.format(name))
@Plugin.command('leave', '<name:snowflake|str...>', aliases=['remove', 'take'])
def leave_role(self, event, name):
if not event.config.group_roles:
return
if name and isinstance(name, list) and isinstance(name[0], int):
name = name[0]
elif name:
name =' '.join(map(str, name)).lower()
role_id = event.config.group_roles.get(name)
if not role_id and name not in event.guild.roles:
raise CommandFail('invalid or unknown group')
member = event.guild.get_member(event.author)
if role_id not in member.roles:
raise CommandFail('you are not a member of that group')
member.remove_role(role_id)
if event.config.group_confirm_reactions:
event.msg.add_reaction(GREEN_TICK_EMOJI)
return
raise CommandSuccess('you have left the {} group'.format(name))
@Plugin.command('unlock', '<role_id:snowflake>', group='role', level=CommandLevels.ADMIN)
def unlock_role(self, event, role_id):
if role_id not in event.config.locked_roles:
raise CommandFail('role %s is not locked' % role_id)
if role_id in self.unlocked_roles and self.unlocked_roles[role_id] > time.time():
raise CommandFail('role %s is already unlocked' % role_id)
self.unlocked_roles[role_id] = time.time() + 300
raise CommandSuccess('role is unlocked for 5 minutes')
@Plugin.command('slowmode', '<interval:int> [channel:channel|snowflake]', level=CommandLevels.MOD)
def slowmode(self, event, interval=0, channel=None):
if interval < 0 or interval > 21600:
raise CommandFail('rate limit interval must be between 0-21600')
if isinstance(channel, DiscoChannel):
channel = channel.id
channel_id = channel or event.channel.id
self.bot.client.api.channels_modify(
channel_id,
rate_limit_per_user=interval,
reason='{} by {} ({})'.format('Enabled' if interval > 0 else 'Disabled', event.msg.author, event.msg.author.id)
)
if interval > 0:
raise CommandSuccess('slowmode enabled')
elif interval == 0:
raise CommandSuccess('slowmode disabled')
@Plugin.command('pong', level=CommandLevels.MOD)
def ping(self, event):
before = time.time()
message = event.msg.reply("Ping…")
ping = (time.time() - before) * 1000
message.edit("Pong! BOT: `{}ms` API: `{}ms`".format(int(ping), self.client.gw.latency))
| 37.810153 | 141 | 0.561239 |
4a200b90748ab89ef42e8e3370c5727282883ea7 | 9,609 | py | Python | onmt/modules/layer_norm.py | mullovc/NMTGMinor | b1b7b1e018eaa0d99a43449655937cc050a29987 | [
"MIT"
] | null | null | null | onmt/modules/layer_norm.py | mullovc/NMTGMinor | b1b7b1e018eaa0d99a43449655937cc050a29987 | [
"MIT"
] | null | null | null | onmt/modules/layer_norm.py | mullovc/NMTGMinor | b1b7b1e018eaa0d99a43449655937cc050a29987 | [
"MIT"
] | null | null | null | import math
import torch
import numbers
from torch.nn.parameter import Parameter
from torch.nn import init
from torch.nn import functional as F
import importlib
try:
import apex.amp as amp
from apex.amp import half_function
except (ModuleNotFoundError, ImportError) as e:
amp = None
from .optimized.compat import half_function
try:
from torch.cuda.amp import custom_fwd, custom_bwd
except (ModuleNotFoundError, ImportError) as e:
from .optimized.compat import custom_fwd, custom_bwd
global fused_layer_norm_cuda
fused_layer_norm_cuda = None
"""
Faster version of Layer Norm from apex (new)
"""
try:
import apex
import fast_layer_norm
fast_fused = True
# print("[INFO] Fast layer norm implementation detected.")
except (ModuleNotFoundError, ImportError) as e:
fast_layer_norm = None
fast_fused = False
# print("[INFO] Fast layer norm implementation not found.")
class FastLayerNormFN(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx, x, gamma, beta, epsilon):
x = x.contiguous()
gamma = gamma.contiguous()
beta = beta.contiguous()
hidden_size = gamma.numel()
xmat = x.view((-1, hidden_size))
ymat, mu, rsigma = fast_layer_norm.ln_fwd(xmat, gamma, beta, epsilon)
ctx.save_for_backward(x, gamma, mu, rsigma)
return ymat.view(x.shape)
@staticmethod
@custom_bwd
def backward(ctx, dy):
# assert dy.is_contiguous()
dy = dy.contiguous() # this happens!
x, gamma, mu, rsigma = ctx.saved_tensors
hidden_size = gamma.numel()
xmat = x.view((-1, hidden_size))
dymat = dy.view(xmat.shape)
dxmat, dgamma, dbeta = fast_layer_norm.ln_bwd(dymat, xmat, mu, rsigma, gamma)
dx = dxmat.view(x.shape)
return dx, dgamma, dbeta, None
"""
Fast version of Layer Norm from Apex
"""
class FusedLayerNormAffineFunction(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx, input, weight, bias, normalized_shape, eps):
global fused_layer_norm_cuda
if fused_layer_norm_cuda is None:
fused_layer_norm_cuda = importlib.import_module("fused_layer_norm_cuda")
ctx.normalized_shape = normalized_shape
ctx.eps = eps
input_ = input.contiguous()
weight_ = weight.contiguous()
bias_ = bias.contiguous()
output, mean, invvar = fused_layer_norm_cuda.forward_affine(
input_, ctx.normalized_shape, weight_, bias_, ctx.eps)
ctx.save_for_backward(input_, weight_, bias_, mean, invvar)
return output
@staticmethod
@custom_bwd
def backward(ctx, grad_output):
input_, weight_, bias_, mean, invvar = ctx.saved_tensors
grad_input = grad_weight = grad_bias = None
grad_input, grad_weight, grad_bias = fused_layer_norm_cuda.backward_affine(
grad_output.contiguous(), mean, invvar,
input_, ctx.normalized_shape,
weight_, bias_, ctx.eps)
return grad_input, grad_weight, grad_bias, None, None
class FusedLayerNormFunction(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx, input, normalized_shape, eps):
global fused_layer_norm_cuda
if fused_layer_norm_cuda is None:
fused_layer_norm_cuda = importlib.import_module("fused_layer_norm_cuda")
ctx.normalized_shape = normalized_shape
ctx.eps = eps
input_ = input.contiguous()
output, mean, invvar = fused_layer_norm_cuda.forward(
input_, ctx.normalized_shape, ctx.eps)
ctx.save_for_backward(input_, mean, invvar)
return output
@staticmethod
@custom_bwd
def backward(ctx, grad_output):
input_, mean, invvar = ctx.saved_tensors
grad_input = None
grad_input = fused_layer_norm_cuda.backward(
grad_output.contiguous(), mean, invvar,
input_, ctx.normalized_shape,
ctx.eps)
return grad_input, None, None
@half_function
def fast_layer_norm_affine(input, weight, bias, normalized_shape, eps=1e-6):
return FastLayerNormFN.apply(input, weight, bias, eps)
@half_function
def fused_layer_norm_affine(input, weight, bias, normalized_shape, eps=1e-6):
return FusedLayerNormAffineFunction.apply(input, weight, bias, normalized_shape, eps)
@half_function
def fused_layer_norm(input, normalized_shape, eps=1e-6):
return FusedLayerNormFunction.apply(input, normalized_shape, eps)
def tiny_value_of_dtype(dtype: torch.dtype):
"""
Returns a moderately tiny value for a given PyTorch data type that is used to avoid numerical
issues such as division by zero.
This is different from `info_value_of_dtype(dtype).tiny` because it causes some NaN bugs.
Only supports floating point dtypes.
"""
if not dtype.is_floating_point:
raise TypeError("Only supports floating point dtypes.")
if dtype == torch.float or dtype == torch.double:
return 1e-13
elif dtype == torch.half:
return 1e-4
else:
raise TypeError("Does not support dtype " + str(dtype))
class LayerNorm(torch.nn.Module):
"""
See LayerNorm for details.
Note, however, that unlike LayerNorm this norm includes a batch component.
"""
def __init__(self, normalized_shape, eps=1e-5, elementwise_affine=True):
super().__init__()
global fused_layer_norm_cuda
self.fused = True
try:
fused_layer_norm_cuda = importlib.import_module("fused_layer_norm_cuda")
except ModuleNotFoundError:
self.fused = False
if isinstance(normalized_shape, numbers.Integral):
normalized_shape = (normalized_shape,)
self.normalized_shape = torch.Size(normalized_shape)
self.eps = eps
self.elementwise_affine = elementwise_affine
if self.elementwise_affine:
self.weight = Parameter(torch.Tensor(*normalized_shape))
self.bias = Parameter(torch.Tensor(*normalized_shape))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
if self.elementwise_affine:
init.ones_(self.weight)
init.zeros_(self.bias)
def forward(self, input):
eps = self.eps
if not (input.is_cuda and input.type() == 'torch.cuda.HalfTensor' ) or not self.fused:
return F.layer_norm(
input, self.normalized_shape, self.weight, self.bias, eps)
if self.elementwise_affine:
# if fast_fused:
# return fast_layer_norm_affine(input, self.weight, self.bias, self.normalized_shape, eps)
return FusedLayerNormAffineFunction.apply(
input, self.weight, self.bias, self.normalized_shape, eps)
else:
return FusedLayerNormFunction.apply(input, self.normalized_shape, eps)
def extra_repr(self):
return '{normalized_shape}, eps={eps}, ' \
'elementwise_affine={elementwise_affine}'.format(**self.__dict__)
class MultilingualLayerNorm(torch.nn.Module):
"""
See LayerNorm for details.
Note, however, that unlike LayerNorm this norm includes a batch component.
"""
def __init__(self, normalized_shape, eps=1e-5, elementwise_affine=True, n_languages=1):
super().__init__()
self.n_languages = n_languages
global fused_layer_norm_cuda
self.fused = True
try:
fused_layer_norm_cuda = importlib.import_module("fused_layer_norm_cuda")
except ModuleNotFoundError:
self.fused = False
if isinstance(normalized_shape, numbers.Integral):
normalized_shape = (normalized_shape,)
self.normalized_shape = torch.Size(normalized_shape)
self.eps = eps
self.elementwise_affine = elementwise_affine
if self.elementwise_affine:
self.weight = Parameter(torch.Tensor(self.n_languages, *self.normalized_shape))
self.bias = Parameter(torch.Tensor(self.n_languages, *self.normalized_shape))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
if self.elementwise_affine:
init.ones_(self.weight)
init.zeros_(self.bias)
def forward(self, input, factor):
# eps = tiny_value_of_dtype(input.dtype)
eps = self.eps
if self.elementwise_affine:
weight = torch.index_select(self.weight, 0, factor).squeeze(0)
bias = torch.index_select(self.bias, 0, factor).squeeze(0)
else:
weight, bias = None, None
if not input.is_cuda or not self.fused:
return F.layer_norm(
input, self.normalized_shape, weight, bias, eps)
if self.elementwise_affine:
if fast_fused and input.is_cuda:
return fast_layer_norm_affine(input, weight, bias, self.normalized_shape, eps)
return fused_layer_norm_affine(
input, weight, bias, self.normalized_shape, eps)
else:
return fused_layer_norm(input, self.normalized_shape, eps)
def extra_repr(self):
return '{normalized_shape}, eps={eps}, ' \
'elementwise_affine={elementwise_affine}'.format(**self.__dict__)
| 33.954064 | 106 | 0.667811 |
4a200bd41d7cc425acc216a441f9d73f86c776b7 | 7,657 | py | Python | test/python/WMComponent_t/JobSubmitter_t/JobSubmitterCaching_t.py | phenomax/WMCore | 7c464735d1434558a7f525081fcc7fda7c2c5fc0 | [
"Apache-2.0"
] | null | null | null | test/python/WMComponent_t/JobSubmitter_t/JobSubmitterCaching_t.py | phenomax/WMCore | 7c464735d1434558a7f525081fcc7fda7c2c5fc0 | [
"Apache-2.0"
] | null | null | null | test/python/WMComponent_t/JobSubmitter_t/JobSubmitterCaching_t.py | phenomax/WMCore | 7c464735d1434558a7f525081fcc7fda7c2c5fc0 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
_JobSubmitterCaching_t_
Verify that the caching of jobs and white/black lists works correctly.
"""
import os
import pickle
import unittest
from WMComponent.JobSubmitter.JobSubmitterPoller import JobSubmitterPoller
from WMCore.JobStateMachine.ChangeState import ChangeState
from WMCore.ResourceControl.ResourceControl import ResourceControl
from WMCore.WMBS.File import File
from WMCore.WMBS.Fileset import Fileset
from WMCore.WMBS.Job import Job
from WMCore.WMBS.JobGroup import JobGroup
from WMCore.WMBS.Subscription import Subscription
from WMCore.WMBS.Workflow import Workflow
from WMCore.WMBase import getTestBase
from WMCore.WorkQueue.WMBSHelper import killWorkflow
from WMQuality.Emulators import EmulatorSetup
from WMQuality.TestInitCouchApp import TestInitCouchApp as TestInit
from WMQuality.Emulators.EmulatedUnitTestCase import EmulatedUnitTestCase
class JobSubmitterCachingTest(EmulatedUnitTestCase):
def setUp(self):
"""
_setUp_
Set everything up.
"""
super(JobSubmitterCachingTest, self).setUp()
self.testInit = TestInit(__file__)
self.testInit.setLogging()
self.testInit.setDatabaseConnection()
self.testInit.setSchema(customModules=["WMCore.WMBS", "WMCore.BossAir",
"WMCore.ResourceControl"],
useDefault=False)
self.testInit.setupCouch("jobsubmittercaching_t/jobs", "JobDump")
self.testInit.setupCouch("jobsubmittercaching_t/fwjrs", "FWJRDump")
resourceControl = ResourceControl()
for siteName in ["T1_US_FNAL", "T1_UK_RAL"]:
resourceControl.insertSite(siteName=siteName, pnn="%s_Disk" % (siteName),
ceName=siteName, plugin="SimpleCondorPlugin", cmsName=siteName)
resourceControl.insertThreshold(siteName=siteName, taskType="Processing",
maxSlots=10000, pendingSlots=10000)
self.testDir = self.testInit.generateWorkDir()
self.configFile = EmulatorSetup.setupWMAgentConfig()
return
def tearDown(self):
"""
_tearDown_
Tear everything down.
"""
self.testInit.clearDatabase()
self.testInit.delWorkDir()
self.testInit.tearDownCouch()
EmulatorSetup.deleteConfig(self.configFile)
return
def createConfig(self):
"""
_createConfig_
Create a config for the JobSubmitter. These parameters are still pulled
from the environment.
"""
config = self.testInit.getConfiguration()
self.testInit.generateWorkDir(config)
config.component_("Agent")
config.Agent.isDocker = False
config.section_("JobStateMachine")
config.JobStateMachine.couchurl = os.getenv("COUCHURL")
config.JobStateMachine.couchDBName = "jobsubmittercaching_t"
config.section_("BossAir")
config.BossAir.pluginDir = "WMCore.BossAir.Plugins"
config.BossAir.pluginNames = ["SimpleCondorPlugin"]
config.BossAir.nCondorProcesses = 1
config.component_("JobSubmitter")
config.JobSubmitter.submitDir = self.testDir
config.JobSubmitter.submitScript = os.path.join(getTestBase(),
'WMComponent_t/JobSubmitter_t',
'submit.sh')
return config
def injectJobs(self):
"""
_injectJobs_
Inject two workflows into WMBS and save the job objects to disk.
"""
testWorkflowA = Workflow(spec="specA.pkl", owner="Steve",
name="wf001", task="TestTaskA")
testWorkflowA.create()
testWorkflowB = Workflow(spec="specB.pkl", owner="Steve",
name="wf002", task="TestTaskB")
testWorkflowB.create()
testFileset = Fileset("testFileset")
testFileset.create()
testSubA = Subscription(fileset=testFileset, workflow=testWorkflowA)
testSubA.create()
testSubB = Subscription(fileset=testFileset, workflow=testWorkflowB)
testSubB.create()
testGroupA = JobGroup(subscription=testSubA)
testGroupA.create()
testGroupB = JobGroup(subscription=testSubB)
testGroupB.create()
stateChanger = ChangeState(self.createConfig(), "jobsubmittercaching_t")
for i in range(10):
newFile = File(lfn="testFile%s" % i,
locations=set(["se.T1_US_FNAL", "se.T1_UK_RAL"]))
newFile.create()
newJobA = Job(name="testJobA-%s" % i, files=[newFile])
newJobA["workflow"] = "wf001"
newJobA["possiblePSN"] = ["T1_US_FNAL"]
newJobA["sandbox"] = "%s/somesandbox" % self.testDir
newJobA["owner"] = "Steve"
jobCacheDir = os.path.join(self.testDir, "jobA-%s" % i)
os.mkdir(jobCacheDir)
newJobA["cache_dir"] = jobCacheDir
newJobA["type"] = "Processing"
newJobA.create(testGroupA)
jobHandle = open(os.path.join(jobCacheDir, "job.pkl"), "wb")
pickle.dump(newJobA, jobHandle)
jobHandle.close()
stateChanger.propagate([newJobA], "created", "new")
newJobB = Job(name="testJobB-%s" % i, files=[newFile])
newJobB["workflow"] = "wf001"
newJobB["possiblePSN"] = ["T1_UK_RAL"]
newJobB["sandbox"] = "%s/somesandbox" % self.testDir
newJobB["owner"] = "Steve"
jobCacheDir = os.path.join(self.testDir, "jobB-%s" % i)
os.mkdir(jobCacheDir)
newJobB["cache_dir"] = jobCacheDir
newJobB["type"] = "Processing"
newJobB.create(testGroupB)
jobHandle = open(os.path.join(jobCacheDir, "job.pkl"), "wb")
pickle.dump(newJobB, jobHandle)
jobHandle.close()
stateChanger.propagate([newJobB], "created", "new")
return
def testCaching(self):
"""
_testCaching_
Verify that JobSubmitter caching works.
"""
config = self.createConfig()
mySubmitterPoller = JobSubmitterPoller(config)
mySubmitterPoller.getThresholds()
mySubmitterPoller.refreshCache()
self.assertEqual(len(mySubmitterPoller.jobDataCache), 0,
"Error: The job cache should be empty.")
self.injectJobs()
mySubmitterPoller.refreshCache()
# Verify the cache is full
self.assertEqual(len(mySubmitterPoller.jobDataCache), 20,
"Error: The job cache should contain 20 jobs. Contains: %i" % len(
mySubmitterPoller.jobDataCache))
killWorkflow("wf001", jobCouchConfig=config)
mySubmitterPoller.refreshCache()
# Verify that the workflow is gone from the cache
self.assertEqual(len(mySubmitterPoller.jobDataCache), 10,
"Error: The job cache should contain 10 jobs. Contains: %i" % len(
mySubmitterPoller.jobDataCache))
killWorkflow("wf002", jobCouchConfig=config)
mySubmitterPoller.refreshCache()
# Verify that the workflow is gone from the cache
self.assertEqual(len(mySubmitterPoller.jobDataCache), 0,
"Error: The job cache should be empty. Contains: %i" % len(mySubmitterPoller.jobDataCache))
return
if __name__ == "__main__":
unittest.main()
| 36.117925 | 117 | 0.619956 |
4a200d1d3149a3b6490f9e716d01cc433de06f5a | 3,525 | py | Python | bindings/python/ensmallen/datasets/string/mucilaginibactersppamc26640.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 5 | 2021-02-17T00:44:45.000Z | 2021-08-09T16:41:47.000Z | bindings/python/ensmallen/datasets/string/mucilaginibactersppamc26640.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 18 | 2021-01-07T16:47:39.000Z | 2021-08-12T21:51:32.000Z | bindings/python/ensmallen/datasets/string/mucilaginibactersppamc26640.py | AnacletoLAB/ensmallen | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 3 | 2021-01-14T02:20:59.000Z | 2021-08-04T19:09:52.000Z | """
This file offers the methods to automatically retrieve the graph Mucilaginibacter sp. PAMC 26640.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def MucilaginibacterSpPamc26640(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Mucilaginibacter sp. PAMC 26640 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Mucilaginibacter sp. PAMC 26640 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="MucilaginibacterSpPamc26640",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 33.571429 | 223 | 0.68 |
4a200d28494e2fdb7d6aba04382a1367bbf79faa | 11,248 | py | Python | neutron/services/metering/drivers/iptables/iptables_driver.py | gampel/neutron | 51a6260266dc59c066072ca890ad9c40b1aad6cf | [
"Apache-2.0"
] | 10 | 2015-09-22T10:22:53.000Z | 2016-02-25T06:12:05.000Z | neutron/services/metering/drivers/iptables/iptables_driver.py | gampel/neutron | 51a6260266dc59c066072ca890ad9c40b1aad6cf | [
"Apache-2.0"
] | 12 | 2015-01-08T18:30:45.000Z | 2015-03-13T21:04:15.000Z | neutron/services/metering/drivers/iptables/iptables_driver.py | gampel/neutron | 51a6260266dc59c066072ca890ad9c40b1aad6cf | [
"Apache-2.0"
] | 7 | 2015-02-05T10:23:52.000Z | 2019-05-18T17:11:19.000Z | # Copyright (C) 2013 eNovance SAS <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from oslo.utils import importutils
import six
from neutron.agent.common import config
from neutron.agent.linux import interface
from neutron.agent.linux import iptables_manager
from neutron.common import constants as constants
from neutron.common import ipv6_utils
from neutron.common import log
from neutron.i18n import _LI
from neutron.openstack.common import log as logging
from neutron.services.metering.drivers import abstract_driver
LOG = logging.getLogger(__name__)
NS_PREFIX = 'qrouter-'
WRAP_NAME = 'neutron-meter'
EXTERNAL_DEV_PREFIX = 'qg-'
TOP_CHAIN = WRAP_NAME + "-FORWARD"
RULE = '-r-'
LABEL = '-l-'
config.register_interface_driver_opts_helper(cfg.CONF)
config.register_use_namespaces_opts_helper(cfg.CONF)
config.register_root_helper(cfg.CONF)
cfg.CONF.register_opts(interface.OPTS)
class IptablesManagerTransaction(object):
__transactions = {}
def __init__(self, im):
self.im = im
transaction = self.__transactions.get(im, 0)
transaction += 1
self.__transactions[im] = transaction
def __enter__(self):
return self.im
def __exit__(self, type, value, traceback):
transaction = self.__transactions.get(self.im)
if transaction == 1:
self.im.apply()
del self.__transactions[self.im]
else:
transaction -= 1
self.__transactions[self.im] = transaction
class RouterWithMetering(object):
def __init__(self, conf, router):
self.conf = conf
self.id = router['id']
self.router = router
self.root_helper = config.get_root_helper(self.conf)
self.ns_name = NS_PREFIX + self.id if conf.use_namespaces else None
self.iptables_manager = iptables_manager.IptablesManager(
root_helper=self.root_helper,
namespace=self.ns_name,
binary_name=WRAP_NAME,
use_ipv6=ipv6_utils.is_enabled())
self.metering_labels = {}
class IptablesMeteringDriver(abstract_driver.MeteringAbstractDriver):
def __init__(self, plugin, conf):
self.plugin = plugin
self.conf = conf or cfg.CONF
self.routers = {}
if not self.conf.interface_driver:
raise SystemExit(_('An interface driver must be specified'))
LOG.info(_LI("Loading interface driver %s"),
self.conf.interface_driver)
self.driver = importutils.import_object(self.conf.interface_driver,
self.conf)
def _update_router(self, router):
r = self.routers.get(router['id'],
RouterWithMetering(self.conf, router))
r.router = router
self.routers[r.id] = r
return r
@log.log
def update_routers(self, context, routers):
# disassociate removed routers
router_ids = set(router['id'] for router in routers)
for router_id, rm in six.iteritems(self.routers):
if router_id not in router_ids:
self._process_disassociate_metering_label(rm.router)
for router in routers:
old_gw_port_id = None
old_rm = self.routers.get(router['id'])
if old_rm:
old_gw_port_id = old_rm.router['gw_port_id']
gw_port_id = router['gw_port_id']
if gw_port_id != old_gw_port_id:
if old_rm:
with IptablesManagerTransaction(old_rm.iptables_manager):
self._process_disassociate_metering_label(router)
if gw_port_id:
self._process_associate_metering_label(router)
elif gw_port_id:
self._process_associate_metering_label(router)
@log.log
def remove_router(self, context, router_id):
if router_id in self.routers:
del self.routers[router_id]
def get_external_device_name(self, port_id):
return (EXTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN]
def _process_metering_label_rules(self, rm, rules, label_chain,
rules_chain):
im = rm.iptables_manager
ext_dev = self.get_external_device_name(rm.router['gw_port_id'])
if not ext_dev:
return
for rule in rules:
remote_ip = rule['remote_ip_prefix']
if rule['direction'] == 'egress':
dir_opt = '-o %s -s %s' % (ext_dev, remote_ip)
else:
dir_opt = '-i %s -d %s' % (ext_dev, remote_ip)
if rule['excluded']:
ipt_rule = '%s -j RETURN' % dir_opt
im.ipv4['filter'].add_rule(rules_chain, ipt_rule,
wrap=False, top=True)
else:
ipt_rule = '%s -j %s' % (dir_opt, label_chain)
im.ipv4['filter'].add_rule(rules_chain, ipt_rule,
wrap=False, top=False)
def _process_associate_metering_label(self, router):
self._update_router(router)
rm = self.routers.get(router['id'])
with IptablesManagerTransaction(rm.iptables_manager):
labels = router.get(constants.METERING_LABEL_KEY, [])
for label in labels:
label_id = label['id']
label_chain = iptables_manager.get_chain_name(WRAP_NAME +
LABEL + label_id,
wrap=False)
rm.iptables_manager.ipv4['filter'].add_chain(label_chain,
wrap=False)
rules_chain = iptables_manager.get_chain_name(WRAP_NAME +
RULE + label_id,
wrap=False)
rm.iptables_manager.ipv4['filter'].add_chain(rules_chain,
wrap=False)
rm.iptables_manager.ipv4['filter'].add_rule(TOP_CHAIN, '-j ' +
rules_chain,
wrap=False)
rm.iptables_manager.ipv4['filter'].add_rule(label_chain,
'',
wrap=False)
rules = label.get('rules')
if rules:
self._process_metering_label_rules(rm, rules,
label_chain,
rules_chain)
rm.metering_labels[label_id] = label
def _process_disassociate_metering_label(self, router):
rm = self.routers.get(router['id'])
if not rm:
return
with IptablesManagerTransaction(rm.iptables_manager):
labels = router.get(constants.METERING_LABEL_KEY, [])
for label in labels:
label_id = label['id']
if label_id not in rm.metering_labels:
continue
label_chain = iptables_manager.get_chain_name(WRAP_NAME +
LABEL + label_id,
wrap=False)
rules_chain = iptables_manager.get_chain_name(WRAP_NAME +
RULE + label_id,
wrap=False)
rm.iptables_manager.ipv4['filter'].remove_chain(label_chain,
wrap=False)
rm.iptables_manager.ipv4['filter'].remove_chain(rules_chain,
wrap=False)
del rm.metering_labels[label_id]
@log.log
def add_metering_label(self, context, routers):
for router in routers:
self._process_associate_metering_label(router)
@log.log
def update_metering_label_rules(self, context, routers):
for router in routers:
self._update_metering_label_rules(router)
def _update_metering_label_rules(self, router):
rm = self.routers.get(router['id'])
if not rm:
return
with IptablesManagerTransaction(rm.iptables_manager):
labels = router.get(constants.METERING_LABEL_KEY, [])
for label in labels:
label_id = label['id']
label_chain = iptables_manager.get_chain_name(WRAP_NAME +
LABEL + label_id,
wrap=False)
rules_chain = iptables_manager.get_chain_name(WRAP_NAME +
RULE + label_id,
wrap=False)
rm.iptables_manager.ipv4['filter'].empty_chain(rules_chain,
wrap=False)
rules = label.get('rules')
if rules:
self._process_metering_label_rules(rm, rules,
label_chain,
rules_chain)
@log.log
def remove_metering_label(self, context, routers):
for router in routers:
self._process_disassociate_metering_label(router)
@log.log
def get_traffic_counters(self, context, routers):
accs = {}
for router in routers:
rm = self.routers.get(router['id'])
if not rm:
continue
for label_id, label in rm.metering_labels.items():
chain = iptables_manager.get_chain_name(WRAP_NAME + LABEL +
label_id, wrap=False)
chain_acc = rm.iptables_manager.get_traffic_counters(
chain, wrap=False, zero=True)
if not chain_acc:
continue
acc = accs.get(label_id, {'pkts': 0, 'bytes': 0})
acc['pkts'] += chain_acc['pkts']
acc['bytes'] += chain_acc['bytes']
accs[label_id] = acc
return accs
| 38.920415 | 79 | 0.53494 |
4a200d785348e7c96857a2e1e4ce1ae4a441a70b | 490 | py | Python | apps/api/urls.py | abdurraufraihan/ecomcore | 689c20c30e43369b27b7efe02e5e809975de0a4f | [
"MIT"
] | 2 | 2021-11-12T15:51:55.000Z | 2021-12-28T05:34:55.000Z | apps/api/urls.py | abdurraufraihan/ecomcore | 689c20c30e43369b27b7efe02e5e809975de0a4f | [
"MIT"
] | 9 | 2021-03-19T09:09:55.000Z | 2022-03-12T00:38:39.000Z | apps/api/urls.py | abdurraufraihan/ecomcore | 689c20c30e43369b27b7efe02e5e809975de0a4f | [
"MIT"
] | 1 | 2021-12-28T05:34:56.000Z | 2021-12-28T05:34:56.000Z | from django.urls import path
from lib import apiendpoints
from apps.api.views.productviews import ProductListView, ProductDetailView
from apps.api.views.categoryviews import CategoryListView
urlpatterns = [
path(
apiendpoints.PRODUCT_URL,
ProductListView.as_view(),
name='productList'
),
path(
apiendpoints.PRODUCT_DETAIL_URL,
ProductDetailView.as_view(),
name='productDetail'
),
path(
apiendpoints.CATEGORY_URL,
CategoryListView.as_view(),
name='categoryList'
)
]
| 21.304348 | 74 | 0.781633 |
4a200d91c9340f59ffed41b3bdaf9890aae75755 | 2,038 | py | Python | code/apps/Managed Software Center/Managed Software Center/CocoaWrapper.py | dderusha/munki | ad3dd1673fc6544770e561b52000371113cd5294 | [
"Apache-2.0"
] | 1 | 2019-01-13T22:36:59.000Z | 2019-01-13T22:36:59.000Z | code/apps/Managed Software Center/Managed Software Center/CocoaWrapper.py | MarcelRaschke/munki | 5ab55e81934cf081d369ab11df70d2ee215df33e | [
"Apache-2.0"
] | null | null | null | code/apps/Managed Software Center/Managed Software Center/CocoaWrapper.py | MarcelRaschke/munki | 5ab55e81934cf081d369ab11df70d2ee215df33e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# CocoaWrapper.py
# MunkiStatus
#
# Created by Greg Neagle on 6/26/17.
# Copyright (c) 2018 The Munki Project. All rights reserved.
#
"""Selectively import Cocoa symbols to speed up app launch.
Idea from Per Olofsson's AutoDMG"""
# PyLint cannot properly find names inside Cocoa libraries, so issues bogus
# No name 'Foo' in module 'Bar' warnings. Disable them.
# pylint: disable=no-name-in-module
#
# disable unused-import warning, since we don't use any of these here.
# pylint: disable=unused-import
# put all Foundation imports used by the project here
from Foundation import (
NSAppleEventManager,
NSBundle,
NSCachesDirectory,
NSData,
NSDate,
NSDateFormatter,
NSDateFormatterBehavior10_4,
NSFileHandle,
NSFileManager,
NSInsetRect,
NSLocalizedString,
NSLog,
NSMakePoint,
NSMakeRect,
NSMakeSize,
NSMinX,
NSMinY,
NSMutableArray,
NSObject,
NSOffsetRect,
NSPoint,
NSPredicate,
NSString,
NSTimer,
NSURL,
NSURLFileScheme,
NSURLRequest,
NSURLRequestReloadIgnoringLocalCacheData,
NSUTF8StringEncoding,
NSUserDomainMask,
NSUserName,
NSZeroRect,
kCFDateFormatterLongStyle,
kCFDateFormatterShortStyle,
)
# put all AppKit imports used by the project here
from AppKit import (
NSAlert,
NSAlertAlternateReturn,
NSAlertDefaultReturn,
NSAlertFirstButtonReturn,
NSAlertOtherReturn,
NSAlertSecondButtonReturn,
NSApp,
NSApplication,
NSBezierPath,
NSButton,
NSButtonCell,
NSColor,
NSCompositeCopy,
NSCriticalAlertStyle,
NSDistributedNotificationCenter,
NSDragOperationAll,
NSFontAttributeName,
NSFontManager,
NSGraphicsContext,
NSImage,
NSNotFound,
NSNotificationDeliverImmediately,
NSNotificationPostToAllSessions,
NSNotificationSuspensionBehaviorDeliverImmediately,
NSOnState,
NSPasteboard,
NSScreen,
NSUserNotificationCenter,
NSWindowController,
NSWorkspace,
)
| 22.395604 | 75 | 0.720805 |
4a200dc11039f316e0431c7a22b363c82cbb0b55 | 4,196 | py | Python | isi_sdk_9_0_0/isi_sdk_9_0_0/models/job_statistics_job_node_io.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 24 | 2018-06-22T14:13:23.000Z | 2022-03-23T01:21:26.000Z | isi_sdk_9_0_0/isi_sdk_9_0_0/models/job_statistics_job_node_io.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 46 | 2018-04-30T13:28:22.000Z | 2022-03-21T21:11:07.000Z | isi_sdk_9_0_0/isi_sdk_9_0_0/models/job_statistics_job_node_io.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 29 | 2018-06-19T00:14:04.000Z | 2022-02-08T17:51:19.000Z | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 10
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from isi_sdk_9_0_0.models.job_statistics_job_node_io_read import JobStatisticsJobNodeIoRead # noqa: F401,E501
from isi_sdk_9_0_0.models.job_statistics_job_node_io_write import JobStatisticsJobNodeIoWrite # noqa: F401,E501
class JobStatisticsJobNodeIo(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'read': 'JobStatisticsJobNodeIoRead',
'write': 'JobStatisticsJobNodeIoWrite'
}
attribute_map = {
'read': 'read',
'write': 'write'
}
def __init__(self, read=None, write=None): # noqa: E501
"""JobStatisticsJobNodeIo - a model defined in Swagger""" # noqa: E501
self._read = None
self._write = None
self.discriminator = None
self.read = read
self.write = write
@property
def read(self):
"""Gets the read of this JobStatisticsJobNodeIo. # noqa: E501
# noqa: E501
:return: The read of this JobStatisticsJobNodeIo. # noqa: E501
:rtype: JobStatisticsJobNodeIoRead
"""
return self._read
@read.setter
def read(self, read):
"""Sets the read of this JobStatisticsJobNodeIo.
# noqa: E501
:param read: The read of this JobStatisticsJobNodeIo. # noqa: E501
:type: JobStatisticsJobNodeIoRead
"""
if read is None:
raise ValueError("Invalid value for `read`, must not be `None`") # noqa: E501
self._read = read
@property
def write(self):
"""Gets the write of this JobStatisticsJobNodeIo. # noqa: E501
# noqa: E501
:return: The write of this JobStatisticsJobNodeIo. # noqa: E501
:rtype: JobStatisticsJobNodeIoWrite
"""
return self._write
@write.setter
def write(self, write):
"""Sets the write of this JobStatisticsJobNodeIo.
# noqa: E501
:param write: The write of this JobStatisticsJobNodeIo. # noqa: E501
:type: JobStatisticsJobNodeIoWrite
"""
if write is None:
raise ValueError("Invalid value for `write`, must not be `None`") # noqa: E501
self._write = write
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, JobStatisticsJobNodeIo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.351351 | 112 | 0.585558 |
4a200e3f584f09ca7446db45059a206903d5fa39 | 1,094 | py | Python | tensorflow/contrib/keras/api/keras/preprocessing/__init__.py | tianyapiaozi/tensorflow | fb3ce0467766a8e91f1da0ad7ada7c24fde7a73a | [
"Apache-2.0"
] | 848 | 2019-12-03T00:16:17.000Z | 2022-03-31T22:53:17.000Z | tensorflow/contrib/keras/api/keras/preprocessing/__init__.py | shrikunjsarda/tensorflow | 7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae | [
"Apache-2.0"
] | 656 | 2019-12-03T00:48:46.000Z | 2022-03-31T18:41:54.000Z | tensorflow/contrib/keras/api/keras/preprocessing/__init__.py | shrikunjsarda/tensorflow | 7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae | [
"Apache-2.0"
] | 506 | 2019-12-03T00:46:26.000Z | 2022-03-30T10:34:56.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras data preprocessing utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.keras.api.keras.preprocessing import image
from tensorflow.contrib.keras.api.keras.preprocessing import sequence
from tensorflow.contrib.keras.api.keras.preprocessing import text
del absolute_import
del division
del print_function
| 39.071429 | 80 | 0.74223 |
4a200f6b112697fc12dd7c6263413bb939e9cf5c | 4,205 | py | Python | benchmark/startQiskit_Class2751.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit_Class2751.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit_Class2751.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=4
# total number=38
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=31
prog.cz(input_qubit[0],input_qubit[3]) # number=32
prog.h(input_qubit[3]) # number=33
prog.cx(input_qubit[0],input_qubit[3]) # number=35
prog.x(input_qubit[3]) # number=36
prog.cx(input_qubit[0],input_qubit[3]) # number=37
prog.cx(input_qubit[0],input_qubit[3]) # number=30
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.x(input_qubit[2]) # number=34
prog.y(input_qubit[1]) # number=19
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.y(input_qubit[2]) # number=11
prog.y(input_qubit[3]) # number=20
prog.y(input_qubit[1]) # number=12
prog.rx(-2.158274153016188,input_qubit[3]) # number=24
prog.h(input_qubit[0]) # number=16
prog.cz(input_qubit[2],input_qubit[0]) # number=17
prog.h(input_qubit[0]) # number=18
prog.cx(input_qubit[1],input_qubit[0]) # number=21
prog.z(input_qubit[1]) # number=22
prog.cx(input_qubit[1],input_qubit[0]) # number=23
prog.h(input_qubit[0]) # number=25
prog.cz(input_qubit[2],input_qubit[0]) # number=26
prog.h(input_qubit[0]) # number=27
# circuit end
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('statevector_simulator')
sample_shot =8000
info = execute(prog, backend=backend).result().get_statevector()
qubits = round(log2(len(info)))
info = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_Class2751.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 34.467213 | 140 | 0.647087 |
4a20103ca0aeef4873ed96eb468af463fd7d2a19 | 4,647 | py | Python | Configuration/DataProcessing/python/Utils.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 6 | 2017-09-08T14:12:56.000Z | 2022-03-09T23:57:01.000Z | Configuration/DataProcessing/python/Utils.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 545 | 2017-09-19T17:10:19.000Z | 2022-03-07T16:55:27.000Z | Configuration/DataProcessing/python/Utils.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 14 | 2017-10-04T09:47:21.000Z | 2019-10-23T18:04:45.000Z | #!/usr/bin/env python
"""
_Utils_
Module containing some utility tools
"""
def stepALCAPRODUCER(skims):
"""
_stepALCAPRODUCER_
Creates and returns the configuration string for the ALCAPRODUCER step
starting from the list of AlcaReco path to be run.
"""
step = ''
if len(skims) >0:
step = ',ALCAPRODUCER:'+('+'.join(skims))
return step
def stepSKIMPRODUCER(PhysicsSkims):
"""
_stepSKIMPRODUCER_
Creates and returns the configuration string for the SKIM step
starting from the list of skims to be run.
"""
step = ''
if len(PhysicsSkims) >0 :
step = ',SKIM:'+('+'.join(PhysicsSkims))
return step
def addMonitoring(process):
"""
_addMonitoring_
Add the monitoring services to the process provided
in order to write out performance summaries to the framework job report
"""
import FWCore.ParameterSet.Config as cms
process.SimpleMemoryCheck = cms.Service("SimpleMemoryCheck",
jobReportOutputOnly = cms.untracked.bool(True)
)
process.Timing = cms.Service("Timing",
summaryOnly = cms.untracked.bool(True)
)
return process
def validateProcess(process):
"""
_validateProcess_
Check attributes of process are appropriate for production
This method returns nothing but will throw a RuntimeError for any issues it finds
likely to cause problems in the production system
"""
schedule=process.schedule_()
paths=process.paths_()
endpaths=process.endpaths_()
# check output mods are in paths and have appropriate settings
for outputModName in process.outputModules_().keys():
outputMod = getattr(process, outputModName)
if not hasattr(outputMod, 'dataset'):
msg = "Process contains output module without dataset PSET: %s \n" % outputModName
msg += " You need to add this PSET to this module to set dataTier and filterName\n"
raise RuntimeError(msg)
ds=getattr(outputMod,'dataset')
if not hasattr(ds, "dataTier"):
msg = "Process contains output module without dataTier parameter: %s \n" % outputModName
msg += " You need to add an untracked parameter to the dataset PSET of this module to set dataTier\n"
raise RuntimeError(msg)
# check module in path or whatever (not sure of exact syntax for endpath)
omRun=False
if schedule==None:
for path in paths:
if outputModName in getattr(process,path).moduleNames():
omRun=True
for path in endpaths:
if outputModName in getattr(process,path).moduleNames():
omRun=True
else:
for path in schedule:
if outputModName in path.moduleNames():
omRun=True
if omRun==False:
msg = "Output Module %s not in endPath" % outputModName
raise RuntimeError(msg)
def dqmIOSource(args):
import FWCore.ParameterSet.Config as cms
if args.get('newDQMIO', False):
return cms.Source("DQMRootSource",
fileNames = cms.untracked(cms.vstring())
)
else:
return cms.Source("PoolSource",
fileNames = cms.untracked(cms.vstring())
)
def harvestingMode(process, datasetName, args,rANDl=True):
import FWCore.ParameterSet.Config as cms
if rANDl and (not args.get('newDQMIO', False)):
process.source.processingMode = cms.untracked.string('RunsAndLumis')
process.dqmSaver.workflow = datasetName
process.dqmSaver.saveByLumiSection = 1
def dictIO(options,args):
if 'outputs' in args:
options.outputDefinition = args['outputs'].__str__()
else:
writeTiers = args.get('writeTiers', [])
options.eventcontent = ','.join(writeTiers)
options.datatier = ','.join(writeTiers)
def dqmSeq(args,default):
if 'dqmSeq' in args and len(args['dqmSeq'])!=0:
return ':'+('+'.join(args['dqmSeq']))
else:
return default
def gtNameAndConnect(globalTag, args):
if 'globalTagConnect' in args and args['globalTagConnect'] != '':
return globalTag + ','+args['globalTagConnect']
# we override here the default in the release which uses the FrontierProd servlet not suited for Tier0 activity
return globalTag +',frontier://PromptProd/CMS_CONDITIONS'
| 33.192857 | 115 | 0.616312 |
4a20107cb606d45d157489b7249da46faafa2f7a | 3,270 | py | Python | test/functional/test_framework/ucacoin_node.py | ucacoin/Ucacoin2 | bc39105adbf648114f55f9f90976af2d2b7cd087 | [
"MIT"
] | 4 | 2020-07-31T12:27:23.000Z | 2021-06-05T23:07:37.000Z | test/functional/test_framework/ucacoin_node.py | ucacoin/Ucacoin2 | bc39105adbf648114f55f9f90976af2d2b7cd087 | [
"MIT"
] | 3 | 2020-08-02T10:47:08.000Z | 2021-07-07T06:41:54.000Z | test/functional/test_framework/ucacoin_node.py | ucacoin/Ucacoin2 | bc39105adbf648114f55f9f90976af2d2b7cd087 | [
"MIT"
] | 3 | 2020-08-24T15:36:47.000Z | 2020-10-13T15:51:47.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from .messages import msg_getheaders, msg_headers, CBlockHeader
from .mininode import P2PInterface, mininode_lock
from .util import wait_until
## UCACoin Test Node
class UCACoinTestNode(P2PInterface):
def __init__(self):
super().__init__()
self.last_sendcmpct = []
self.block_announced = False
# Store the hashes of blocks we've seen announced.
# This is for synchronizing the p2p message traffic,
# so we can eg wait until a particular block is announced.
self.announced_blockhashes = set()
def on_sendcmpct(self, message):
self.last_sendcmpct.append(message)
def on_cmpctblock(self, message):
self.block_announced = True
self.last_message["cmpctblock"].header_and_shortids.header.calc_sha256()
self.announced_blockhashes.add(self.last_message["cmpctblock"].header_and_shortids.header.sha256)
def on_headers(self, message):
self.block_announced = True
for x in self.last_message["headers"].headers:
x.calc_sha256()
self.announced_blockhashes.add(x.sha256)
def on_inv(self, message):
for x in self.last_message["inv"].inv:
if x.type == 2:
self.block_announced = True
self.announced_blockhashes.add(x.hash)
# Requires caller to hold mininode_lock
def received_block_announcement(self):
return self.block_announced
def clear_block_announcement(self):
with mininode_lock:
self.block_announced = False
self.last_message.pop("inv", None)
self.last_message.pop("headers", None)
self.last_message.pop("cmpctblock", None)
def get_headers(self, locator, hashstop):
msg = msg_getheaders()
msg.locator.vHave = locator
msg.hashstop = hashstop
self.connection.send_message(msg)
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [CBlockHeader(b) for b in new_blocks]
self.send_message(headers_message)
def request_headers_and_sync(self, locator, hashstop=0):
self.clear_block_announcement()
self.get_headers(locator, hashstop)
wait_until(self.received_block_announcement, timeout=30, lock=mininode_lock)
self.clear_block_announcement()
# Block until a block announcement for a particular block hash is received.
def wait_for_block_announcement(self, block_hash, timeout=30):
def received_hash():
return (block_hash in self.announced_blockhashes)
wait_until(received_hash, timeout=timeout, lock=mininode_lock)
def send_await_disconnect(self, message, timeout=30):
"""Sends a message to the node and wait for disconnect.
This is used when we want to send a message into the node that we expect
will get us disconnected, eg an invalid block."""
self.send_message(message)
wait_until(lambda: not self.connected, timeout=timeout, lock=mininode_lock)
| 39.39759 | 105 | 0.693884 |
4a20133bd20931a5b5c6d193d609ce9e45ee1366 | 3,582 | py | Python | tests/test_dense_model.py | r-o-s-h-a-n/semisupervisedFL | 4c568b4a9cead5aa57f403c1e1bc10e2eaac07e3 | [
"MIT"
] | 5 | 2020-02-25T00:24:11.000Z | 2021-03-19T12:28:14.000Z | tests/test_dense_model.py | r-o-s-h-a-n/semisupervisedFL | 4c568b4a9cead5aa57f403c1e1bc10e2eaac07e3 | [
"MIT"
] | 9 | 2020-02-11T02:33:56.000Z | 2021-11-10T19:54:17.000Z | tests/test_dense_model.py | r-o-s-h-a-n/semisupervisedFL | 4c568b4a9cead5aa57f403c1e1bc10e2eaac07e3 | [
"MIT"
] | 2 | 2020-02-13T15:12:02.000Z | 2020-05-28T18:23:17.000Z | import os
import shutil
import numpy as np
import functools
import unittest
import warnings
import pickle
import csv
import tensorflow as tf
import tensorflow_federated as tff
import dataloader as dta
import models as mdl
warnings.simplefilter('ignore')
class TestDenseAutoencoderModel(unittest.TestCase):
def setUp(self):
ph = {'optimizer': 'SGD',
'learning_rate': 10.0,
'dataset': 'emnist'}
keras_model_fn = mdl.DenseAutoencoderModel(ph)
preprocess_fn = getattr(keras_model_fn, 'preprocess_emnist')
dataloader = dta.DataLoader(
preprocess_fn,
num_epochs=1,
shuffle_buffer=1,
batch_size=20,
learning_env='federated'
)
train_client_data, _ = dta.get_client_data('emnist',
'example',
{'supervised':0.0,
'unsupervised':0.0},
sample_client_data = True)
sample_batch = dataloader.get_sample_batch(train_client_data)
model_fn = functools.partial(keras_model_fn.create_tff_model_fn, sample_batch)
iterative_process = tff.learning.build_federated_averaging_process(model_fn)
state = iterative_process.initialize()
sample_clients = train_client_data.client_ids[:5]
federated_train_data = dataloader.make_federated_data(train_client_data, sample_clients)
state, _ = iterative_process.next(state, federated_train_data)
self.old_model = keras_model_fn()
tff.learning.assign_weights_to_keras_model(self.old_model, state.model)
self.tmp_dir = 'tests/tmp/'
if not os.path.isdir(self.tmp_dir):
os.mkdir(self.tmp_dir)
self.model_fp = os.path.join(self.tmp_dir, 'model.h5')
keras_model_fn.save_model_weights(self.model_fp, state, sample_batch)
self.new_model = keras_model_fn.load_model_weights(self.model_fp)
ph = {'optimizer': 'SGD',
'learning_rate': 10.0,
'dataset': 'emnist',
'pretrained_model_fp': self.model_fp}
self.transfer_model = mdl.DenseSupervisedModel(ph)()
def tearDown(self):
shutil.rmtree(self.tmp_dir)
def test_save_model(self):
self.assertTrue(os.path.isfile(self.model_fp))
def test_load_model(self):
# all layers should be same
for i in range(len(self.old_model.get_weights())):
try:
np.testing.assert_almost_equal(self.old_model.get_weights()[i],
self.new_model.get_weights()[i])
except AssertionError:
self.fail('Initial weights are not same as loaded weights')
def test_transfer_model(self):
# encoder weights should be same
try:
np.testing.assert_allclose(self.old_model.get_weights()[0], self.transfer_model.get_weights()[0])
except AssertionError:
self.fail('Saved encoder model weights are not all close')
# decoder weights should be different
self.assertRaises(AssertionError, np.testing.assert_allclose, self.old_model.get_weights()[-1],
self.transfer_model.get_weights()[-1])
if __name__ == '__main__':
unittest.main() | 36.181818 | 109 | 0.592686 |
4a201406709034e28be9affe189767c934b67020 | 6,008 | py | Python | stubs.min/Autodesk/Revit/DB/__init___parts/UV.py | denfromufa/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | 1 | 2017-07-07T11:15:45.000Z | 2017-07-07T11:15:45.000Z | stubs.min/Autodesk/Revit/DB/__init___parts/UV.py | hdm-dt-fb/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | null | null | null | stubs.min/Autodesk/Revit/DB/__init___parts/UV.py | hdm-dt-fb/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | null | null | null | class UV(object):
"""
Object representing coordinates in 2-dimensional space.
UV(u: float,v: float)
UV()
"""
def Add(self,source):
"""
Add(self: UV,source: UV) -> UV
Adds the specified 2-D vector to this 2-D vector and returns the result.
source: The vector to add to this vector.
Returns: The 2-D vector equal to the sum of the two vectors.
"""
pass
def AngleTo(self,source):
"""
AngleTo(self: UV,source: UV) -> float
Returns the angle between this vector and the specified vector.
source: The specified vector.
Returns: The real number between 0 and 2*PI equal to the angle between the two vectors
in radians.
"""
pass
def CrossProduct(self,source):
"""
CrossProduct(self: UV,source: UV) -> float
The cross product of this 2-D vector and the specified 2-D vector.
source: The vector to multiply with this vector.
Returns: The real number equal to the cross product.
"""
pass
def DistanceTo(self,source):
"""
DistanceTo(self: UV,source: UV) -> float
Returns the distance from this 2-D point to the specified 2-D point.
source: The specified point.
Returns: The real number equal to the distance between the two points.
"""
pass
def Divide(self,value):
"""
Divide(self: UV,value: float) -> UV
Divides this 2-D vector by the specified value and returns the result.
value: The value to divide this vector by.
Returns: The divided 2-D vector.
"""
pass
def DotProduct(self,source):
"""
DotProduct(self: UV,source: UV) -> float
The dot product of this 2-D vector and the specified 2-D vector.
source: The vector to multiply with this vector.
Returns: The real number equal to the dot product.
"""
pass
def GetLength(self):
"""
GetLength(self: UV) -> float
The length of this 2-D vector.
"""
pass
def IsAlmostEqualTo(self,source,tolerance=None):
"""
IsAlmostEqualTo(self: UV,source: UV) -> bool
Determines whether this 2-D vector and the specified 2-D vector are the same
within the tolerance (1.0e-09).
source: The vector to compare with this vector.
Returns: True if the vectors are the same; otherwise,false.
IsAlmostEqualTo(self: UV,source: UV,tolerance: float) -> bool
Determines whether this 2-D vector and the specified 2-D vector are the same
within a specified tolerance.
source: The vector to compare with this vector.
tolerance: The tolerance for equality check.
Returns: True if the vectors are the same; otherwise,false.
"""
pass
def IsUnitLength(self):
"""
IsUnitLength(self: UV) -> bool
The boolean value indicates whether this 2-D vector is of unit length.
"""
pass
def IsZeroLength(self):
"""
IsZeroLength(self: UV) -> bool
The boolean value indicates whether this 2-D vector is a zero vector.
"""
pass
def Multiply(self,value):
"""
Multiply(self: UV,value: float) -> UV
Multiplies this 2-D vector by the specified value and returns the result.
value: The value to multiply with this vector.
Returns: The multiplied 2-D vector.
"""
pass
def Negate(self):
"""
Negate(self: UV) -> UV
Negates this 2-D vector.
Returns: The 2-D vector opposite to this vector.
"""
pass
def Normalize(self):
"""
Normalize(self: UV) -> UV
Returns a new UV whose coordinates are the normalized values from this vector.
Returns: The normalized UV or zero if the vector is almost Zero.
"""
pass
def Subtract(self,source):
"""
Subtract(self: UV,source: UV) -> UV
Subtracts the specified 2-D vector from this 2-D vector and returns the result.
source: The vector to subtract from this vector.
Returns: The 2-D vector equal to the difference between the two vectors.
"""
pass
def ToString(self):
"""
ToString(self: UV) -> str
Gets formatted string showing (U,V) with values formatted to 9 decimal places.
"""
pass
def __add__(self,*args):
""" x.__add__(y) <==> x+y """
pass
def __div__(self,*args):
""" x.__div__(y) <==> x/y """
pass
def __getitem__(self,*args):
""" x.__getitem__(y) <==> x[y] """
pass
def __mul__(self,*args):
""" x.__mul__(y) <==> x*y """
pass
def __neg__(self,*args):
""" x.__neg__() <==> -x """
pass
@staticmethod
def __new__(self,u=None,v=None):
"""
__new__(cls: type,u: float,v: float)
__new__(cls: type)
"""
pass
def __radd__(self,*args):
"""
__radd__(left: UV,right: UV) -> UV
Adds the two specified 2-D vectors and returns the result.
left: The first vector.
right: The second vector.
Returns: The 2-D vector equal to the sum of the two source vectors.
"""
pass
def __rmul__(self,*args):
"""
__rmul__(value: float,right: UV) -> UV
The product of the specified number and the specified 2-D vector.
value: The value to multiply with the specified vector.
right: The vector to multiply with the value.
Returns: The multiplied 2-D vector.
"""
pass
def __rsub__(self,*args):
"""
__rsub__(left: UV,right: UV) -> UV
Subtracts the two specified 2-D vectors and returns the result.
left: The first vector.
right: The second vector.
Returns: The 2-D vector equal to the difference between the two source vectors.
"""
pass
def __sub__(self,*args):
""" x.__sub__(y) <==> x-y """
pass
U=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the first coordinate.
Get: U(self: UV) -> float
"""
V=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the second coordinate.
Get: V(self: UV) -> float
"""
BasisU=None
BasisV=None
Zero=None
| 26.008658 | 92 | 0.627497 |
4a2014aca544c5cb9053ea9e1081e4de1955297b | 5,968 | py | Python | test/functional/rpc_users.py | Game-Frag/game-frag-coin | 87577eb3427c897525d6e833e403588c147a4d87 | [
"MIT"
] | null | null | null | test/functional/rpc_users.py | Game-Frag/game-frag-coin | 87577eb3427c897525d6e833e403588c147a4d87 | [
"MIT"
] | null | null | null | test/functional/rpc_users.py | Game-Frag/game-frag-coin | 87577eb3427c897525d6e833e403588c147a4d87 | [
"MIT"
] | 1 | 2021-06-05T01:09:23.000Z | 2021-06-05T01:09:23.000Z | #!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multiple RPC users."""
from test_framework.test_framework import GamefragTestFramework
from test_framework.util import str_to_b64str, assert_equal
import os
import http.client
import urllib.parse
class HTTPBasicsTest (GamefragTestFramework):
def set_test_params(self):
self.num_nodes = 2
def setup_chain(self):
super().setup_chain()
#Append rpcauth to gamefrag.conf before initialization
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
rpcuser = "rpcuser=rpcuser�"
rpcpassword = "rpcpassword=rpcpassword�"
with open(os.path.join(self.options.tmpdir+"/node0", "gamefrag.conf"), 'a', encoding='utf8') as f:
f.write(rpcauth+"\n")
f.write(rpcauth2+"\n")
with open(os.path.join(self.options.tmpdir+"/node1", "gamefrag.conf"), 'a', encoding='utf8') as f:
f.write(rpcuser+"\n")
f.write(rpcpassword+"\n")
def run_test(self):
##################################################
# Check correctness of the rpcauth config option #
##################################################
url = urllib.parse.urlparse(self.nodes[0].url)
#Old authpair
authpair = url.username + ':' + url.password
#New authpair generated via share/rpcuser tool
password = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM="
#Second authpair with different username
password2 = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI="
authpairnew = "rt:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Use new authpair to confirm both work
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong login name with rt's password
authpairnew = "rtwrong:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Wrong password for rt
authpairnew = "rt:"+password+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Correct for rt2
authpairnew = "rt2:"+password2
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong password for rt2
authpairnew = "rt2:"+password2+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
###############################################################
# Check correctness of the rpcuser/rpcpassword config options #
###############################################################
url = urllib.parse.urlparse(self.nodes[1].url)
# rpcuser and rpcpassword authpair
rpcuserauthpair = "rpcuser�:rpcpassword�"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong login name with rpcuser's password
rpcuserauthpair = "rpcuserwrong:rpcpassword"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Wrong password for rpcuser
rpcuserauthpair = "rpcuser:rpcpasswordwrong"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
if __name__ == '__main__':
HTTPBasicsTest ().main ()
| 38.753247 | 129 | 0.614444 |
4a20158135ccd710c61ab02431f2ca513a9e965e | 1,176 | py | Python | examples/ge2e/random_cycle.py | yt605155624/Parakeet | 8ce8254adad55df07288df86cecdbf0f608b73fb | [
"Apache-2.0"
] | 501 | 2020-02-28T12:46:59.000Z | 2022-03-29T19:49:52.000Z | examples/ge2e/random_cycle.py | yt605155624/Parakeet | 8ce8254adad55df07288df86cecdbf0f608b73fb | [
"Apache-2.0"
] | 75 | 2020-03-24T04:40:41.000Z | 2021-11-19T02:18:30.000Z | examples/ge2e/random_cycle.py | yt605155624/Parakeet | 8ce8254adad55df07288df86cecdbf0f608b73fb | [
"Apache-2.0"
] | 79 | 2020-03-11T01:50:26.000Z | 2022-03-20T09:37:07.000Z | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
def cycle(iterable):
# cycle('ABCD') --> A B C D A B C D A B C D ...
saved = []
for element in iterable:
yield element
saved.append(element)
while saved:
for element in saved:
yield element
def random_cycle(iterable):
# cycle('ABCD') --> A B C D B C D A A D B C ...
saved = []
for element in iterable:
yield element
saved.append(element)
random.shuffle(saved)
while saved:
for element in saved:
yield element
random.shuffle(saved)
| 29.4 | 74 | 0.665816 |
4a2015b690b03f4e22a3949fcb021e015ecbe6b1 | 284 | py | Python | src/anicloud/util/button_in_text.py | Nanashisa/anicloud-api | 5a8f6cf8e991274d3f4185a3b836caf1501cdb81 | [
"MIT"
] | null | null | null | src/anicloud/util/button_in_text.py | Nanashisa/anicloud-api | 5a8f6cf8e991274d3f4185a3b836caf1501cdb81 | [
"MIT"
] | null | null | null | src/anicloud/util/button_in_text.py | Nanashisa/anicloud-api | 5a8f6cf8e991274d3f4185a3b836caf1501cdb81 | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup
def parse_url_button_in_text(s: str):
html = BeautifulSoup(s, features="html.parser")
data = []
for a in html.select("a"):
data.append((a.attrs["href"], a.text.strip()))
a.replace_with("")
return html.text.strip(), data
| 25.818182 | 54 | 0.633803 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.