code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
from kitty.data.report import Report
from kitty.fuzzers import ServerFuzzer
from kitty.model import Container, KittyException
from apifuzzer.utils import get_logger, transform_data_to_bytes
def _flatten_dict_entry(orig_key, v):
"""
This function is called recursively to list the params in template
:param orig_key: original key
:param v: list of params
:rtype: list
"""
entries = []
if isinstance(v, list):
count = 0
for elem in v:
entries.extend(_flatten_dict_entry("%s[%s]" % (orig_key, count), elem))
count += 1
elif isinstance(v, dict):
for k in v:
entries.extend(_flatten_dict_entry("%s/%s" % (orig_key, k), v[k]))
else:
entries.append((orig_key, v))
return entries
class OpenApiServerFuzzer(ServerFuzzer):
"""Extends the ServerFuzzer with exit after the end message."""
def not_implemented(self, func_name):
_ = func_name
pass
def __init__(self):
self.logger = get_logger(self.__class__.__name__)
self.logger.info("Logger initialized")
super(OpenApiServerFuzzer, self).__init__(logger=self.logger)
def _transmit(self, node):
"""
Where the magic happens. This function prepares the request
:param node: Kitty template
:type node: object
"""
payload = {"content_type": self.model.content_type}
for key in ["url", "method"]:
payload[key] = transform_data_to_bytes(node.get_field_by_name(key).render())
fuzz_places = ["params", "headers", "data", "path_variables"]
for place in fuzz_places:
try:
if place in node._fields_dict:
param = node.get_field_by_name(place)
_result = self._recurse_params(param)
payload[place] = _result
except KittyException as e:
self.logger.warning(f"Exception occurred while processing {place}: {e}")
self._last_payload = payload
try:
return self.target.transmit(**payload)
except Exception as e:
self.logger.error(f"Error in transmit: {e}")
raise e
@staticmethod
def _recurse_params(param):
"""
Iterates trough parameters recursively
:param param: param to process
:type param: object
:rtype: dict
"""
_return = dict()
if isinstance(param, Container):
for field in param._fields:
_return[field.get_name()] = OpenApiServerFuzzer._recurse_params(field)
elif hasattr(param, "render"):
_return = transform_data_to_bytes(param.render()).decode(errors="ignore")
return _return
def _store_report(self, report):
"""
Enrich fuzz report
:param report: report to extend
"""
self.logger.debug("<in>")
report.add("test_number", self.model.current_index())
report.add("fuzz_path", self.model.get_sequence_str())
test_info = self.model.get_test_info()
data_model_report = Report(name="Data Model")
for k, v in test_info.items():
new_entries = _flatten_dict_entry(k, v)
for (k_, v_) in new_entries:
data_model_report.add(k_, v_)
report.add(data_model_report.get_name(), data_model_report)
payload = self._last_payload
if payload is not None:
data_report = Report("payload")
data_report.add("raw", payload)
data_report.add("length", len(payload))
report.add("payload", data_report)
else:
report.add("payload", None)
self.dataman.store_report(report, self.model.current_index())
# TODO investigate:
# self.dataman.get_report_by_id(self.model.current_index())
def _test_environment(self):
"""
Checks the test environment - not used
"""
sequence = self.model.get_sequence()
try:
if self._run_sequence(sequence):
self.logger.info("Environment test failed")
except Exception:
self.logger.info("Environment test failed") | APIFuzzer | /APIFuzzer-0.9.13-py3-none-any.whl/apifuzzer/server_fuzzer.py | server_fuzzer.py |
import json
from urllib.parse import urlparse
from json_ref_dict import materialize, RefDict
from apifuzzer.base_template import BaseTemplate
from apifuzzer.fuzz_utils import get_sample_data_by_type, get_fuzz_type_by_param_type
from apifuzzer.move_json_parts import JsonSectionAbove
from apifuzzer.template_generator_base import TemplateGenerator
from apifuzzer.utils import transform_data_to_bytes, pretty_print, get_logger
class ParamTypes(object):
PATH = "path"
QUERY = "query"
HEADER = "header"
COOKIE = "cookie"
BODY = "body"
FORM_DATA = "formData"
class OpenAPITemplateGenerator(TemplateGenerator):
"""
This class processes the Swagger, OpenAPI v2 and OpenAPI v3 definitions. Generates Fuzz template from the params
discovered.
"""
def __init__(self, api_definition_url, api_definition_file):
"""
:param api_definition_file: API resources local file
:type api_definition_file: str
:param api_definition_url: URL where the request should be sent
:type api_definition_url: str
"""
super().__init__()
self.templates = set()
self.logger = get_logger(self.__class__.__name__)
self.api_definition_url = api_definition_url
self.api_definition_file = api_definition_file
tmp_api_resources = self.resolve_json_references()
self.json_formatter = JsonSectionAbove(tmp_api_resources)
self.api_resources = self.json_formatter.resolve()
def resolve_json_references(self):
if self.api_definition_url:
reference = self.api_definition_url
else:
reference = self.api_definition_file
ref = RefDict(reference)
return materialize(ref)
@staticmethod
def _normalize_url(url_in):
"""
Kitty doesn't support some characters as template name so need to be cleaned, but it is necessary,
so we will change back later
:param url_in: url to process
:type url_in: str
:return: processed url
:rtype: str
"""
return url_in.strip("/").replace("/", "+")
def _get_template(self, template_name):
"""
Starts new template if it does not exist yet or retrun the existing one which has the required name
:param template_name: name of the template
:type template_name: str
:return: instance of BaseTemplate
"""
_return = None
for template in self.templates:
self.logger.debug(f"Checking {template.name} vs {template_name}")
if template.name == template_name:
self.logger.debug(f"Loading existing template: {template.name}")
_return = template
if not _return:
self.logger.debug(f"Open new Fuzz template for {template_name}")
_return = BaseTemplate(name=template_name)
return _return
def _save_template(self, template):
if template in self.templates:
self.logger.debug(f"Removing previous version of {template.name}")
self.templates.remove(template)
self.templates.add(template)
self.logger.debug(
f"Adding template to list: {template.name}, templates list: {len(self.templates)}"
)
@staticmethod
def _split_content_type(content_type):
"""
application/x-www-form-urlencoded -> x-www-form-urlencoded
multipart/form-data -> form-data
application/json -> json
:param content_type:
:return:
"""
if "/" in content_type:
return content_type.split("/", 1)[1]
else:
return content_type
def process_api_resources(
self, paths=None, existing_template=None
): # pylint: disable=W0221
self.logger.info("Start preparation")
self._process_request_body()
self._process_api_resources()
def _process_request_body(self):
paths = self.api_resources["paths"]
request_body_paths = dict()
for resource in paths.keys():
normalized_url = self._normalize_url(resource)
if not request_body_paths.get(resource):
request_body_paths[resource] = dict()
for method in paths[resource].keys():
if not request_body_paths[resource].get(method):
request_body_paths[resource][method] = dict()
for content_type in (
paths[resource][method].get("requestBody", {}).get("content", [])
):
# as multiple content types can exist here, we need to open up new template
template_name = f"{normalized_url}|{method}-{self._split_content_type(content_type)}"
self.logger.info(
f"Resource: {resource} Method: {method}, CT: {content_type}"
)
template = self._get_template(template_name)
template.url = normalized_url
template.method = method.upper()
template.content_type = content_type
if not request_body_paths[resource][method].get("parameters"):
request_body_paths[resource][method]["parameters"] = []
for k, v in paths[resource][method]["requestBody"]["content"][
content_type
].items():
request_body_paths[resource][method]["parameters"].append(
{"in": "body", k: v}
)
self._process_api_resources(
paths=request_body_paths, existing_template=template
)
def _process_api_resources(self, paths=None, existing_template=None):
if paths is None:
paths = self.api_resources.get("paths")
for resource in paths.keys():
normalized_url = self._normalize_url(resource)
for method in paths[resource].keys():
self.logger.info("Resource: {} Method: {}".format(resource, method))
if existing_template:
template = existing_template
template_name = existing_template.name
else:
template_name = "{}|{}".format(normalized_url, method)
template = self._get_template(template_name)
template.url = normalized_url
template.method = method.upper()
# Version 2: Set content type (POST, PUT method)
if len(paths[resource][method].get("consumes", [])):
template.content_type = paths[resource][method]["consumes"][0]
for param in list(paths[resource][method].get("parameters", {})):
if not isinstance(param, dict):
self.logger.warning(
"{} type mismatch, dict expected, got: {}".format(
param, type(param)
)
)
param = json.loads(param)
if param.get("type"):
parameter_data_type = param.get("type")
else:
parameter_data_type = "string"
param_format = param.get("format")
if param.get("example"):
sample_data = param.get("example")
elif param.get("default"):
sample_data = param.get("default")
else:
sample_data = get_sample_data_by_type(param.get("type"))
parameter_place_in_request = param.get("in")
parameters = list()
if param.get("name"):
param_name = f'{template_name}|{param.get("name")}'
parameters.append(
{"name": param_name, "type": parameter_data_type}
)
for _param in param.get("properties", []):
param_name = f"{template_name}|{_param}"
parameter_data_type = (
param.get("properties", {})
.get(_param)
.get("type", "string")
)
self.logger.debug(
f"Adding property: {param_name} with type: {parameter_data_type}"
)
_additional_param = {
"name": param_name,
"type": parameter_data_type,
"default": param.get("properties", {})
.get(_param)
.get("default"),
"example": param.get("properties", {})
.get(_param)
.get("example"),
"enum": param.get("properties", {}).get(_param).get("enum"),
}
parameters.append(_additional_param)
for _parameter in parameters:
param_name = _parameter.get("name")
parameter_data_type = _parameter.get("type")
if _parameter.get("enum"):
fuzzer_type = "enum"
elif param_format is not None:
fuzzer_type = param_format.lower()
elif parameter_data_type is not None:
fuzzer_type = parameter_data_type.lower()
else:
fuzzer_type = None
fuzz_type = get_fuzz_type_by_param_type(fuzzer_type)
if _parameter.get("enum") and hasattr(
fuzz_type, "accept_list_as_value"
):
sample_data = _parameter.get("enum")
elif _parameter.get("example"):
sample_data = _parameter.get("example")
elif _parameter.get("default"):
sample_data = _parameter.get("default")
self.logger.info(
f"Resource: {resource} Method: {method} \n Parameter: {param} \n"
f" Parameter place: {parameter_place_in_request} \n Sample data: {sample_data}"
f"\n Param name: {param_name}\n fuzzer_type: {fuzzer_type} "
f"fuzzer: {fuzz_type.__name__}"
)
if parameter_place_in_request == ParamTypes.PATH:
template.path_variables.add(
fuzz_type(name=param_name, value=str(sample_data))
)
elif parameter_place_in_request == ParamTypes.HEADER:
template.headers.add(
fuzz_type(
name=param_name,
value=transform_data_to_bytes(sample_data),
)
)
elif parameter_place_in_request == ParamTypes.COOKIE:
template.cookies.add(
fuzz_type(name=param_name, value=sample_data)
)
elif parameter_place_in_request == ParamTypes.QUERY:
template.params.add(
fuzz_type(name=param_name, value=str(sample_data))
)
elif parameter_place_in_request == ParamTypes.BODY:
if hasattr(fuzz_type, "accept_list_as_value"):
template.data.add(
fuzz_type(name=param_name, value=sample_data)
)
else:
template.data.add(
fuzz_type(
name=param_name,
value=transform_data_to_bytes(sample_data),
)
)
elif parameter_place_in_request == ParamTypes.FORM_DATA:
template.params.add(
fuzz_type(name=param_name, value=str(sample_data))
)
else:
self.logger.warning(
f"Can not parse a definition ({parameter_place_in_request}): "
f"{pretty_print(param)}"
)
if template.get_stat() > 0:
self._save_template(template)
def _compile_base_url_for_swagger(self, alternate_url):
if alternate_url:
_base_url = "/".join(
[
alternate_url.strip("/"),
self.api_resources.get("basePath", "").strip("/"),
]
)
else:
if "http" in self.api_resources["schemes"]:
_protocol = "http"
else:
_protocol = self.api_resources["schemes"][0]
_base_url = "{}://{}{}".format(
_protocol, self.api_resources["host"], self.api_resources["basePath"]
)
return _base_url
def _compile_base_url_for_openapi(self, alternate_url):
if self.api_resources.get("servers"):
uri = urlparse(self.api_resources.get("servers", [])[0].get("url"))
else:
uri = urlparse(alternate_url)
if alternate_url:
_base_url = "/".join([alternate_url.strip("/"), uri.path.strip("/")])
else:
_base_url = self.api_resources.get("servers", [])[0].get("url")
return _base_url
def compile_base_url(self, alternate_url):
"""
:param alternate_url: alternate protocol and base url to be used instead of the one defined in swagger
:type alternate_url: string
"""
if self.api_resources.get("swagger", "").startswith("2"):
_base_url = self._compile_base_url_for_swagger(alternate_url)
self.logger.debug("Using swagger style url: {}".format(_base_url))
elif self.api_resources.get("openapi", "").startswith("3"):
_base_url = self._compile_base_url_for_openapi(alternate_url)
self.logger.debug("Using openapi style url: {}".format(_base_url))
else:
self.logger.warning(
"Failed to find base url, using the alternative one ({})".format(
alternate_url
)
)
_base_url = alternate_url
return _base_url | APIFuzzer | /APIFuzzer-0.9.13-py3-none-any.whl/apifuzzer/openapi_template_generator.py | openapi_template_generator.py |
import argparse
import json
import logging
import os
import sys
from base64 import b64encode
from binascii import Error
from io import BytesIO
from logging import Formatter
from logging.handlers import SysLogHandler
from random import SystemRandom
from typing import Optional
import pycurl
from bitstring import Bits
from apifuzzer.version import get_version
logger_name = "APIFuzzer"
def secure_randint(minimum, maximum):
"""
Provides solution for B311 "Standard pseudo-random generators are not suitable for security/cryptographic purposes."
:param minimum: minimum value
:type minimum: int
:param maximum: maximum value
:type maximum: int
:return: random integer value between min and maximum
"""
rand = SystemRandom()
return rand.randrange(start=minimum, stop=maximum)
def set_logger(level="warning", basic_output=False):
"""
Setup logger
:param level: log level
:type level: log level
:param basic_output: If set to True, application logs to the terminal not to Syslog
:type basic_output: bool
:rtype logger
"""
if level.lower() == "debug":
fmt = "%(process)d [%(levelname)7s] %(name)s [%(filename)s:%(lineno)s - %(funcName)20s ]: %(message)s"
else:
fmt = "%(process)d [%(levelname)7s] %(name)s: %(message)s"
logger = logging.getLogger(logger_name)
logger.handlers.clear()
if basic_output:
handler = logging.StreamHandler(stream=sys.stdout)
else:
if os.path.exists("/dev/log"):
handler = SysLogHandler(
address="/dev/log", facility=SysLogHandler.LOG_LOCAL2
)
else:
handler = logging.StreamHandler(stream=sys.stdout)
handler.setFormatter(Formatter(fmt))
logger.addHandler(handler)
kitty_logger = logging.getLogger("kitty")
kitty_logger.setLevel(level=logging.getLevelName(level.upper()))
logger.setLevel(level=logging.getLevelName(level.upper()))
logger.propagate = False
return logger
def get_logger(name):
"""
Configure the logger
:param name: name of the new logger
:return: logger object
"""
logger = logging.getLogger(logger_name).getChild(name)
return logger
def transform_data_to_bytes(data_in):
"""
Transform data to bytes
:param data_in: data to transform
:type data_in: str, float, Bits
:rtype: bytearray
"""
if isinstance(data_in, float):
return bytes(int(data_in))
elif isinstance(data_in, str):
return bytes(data_in, "utf-16")
elif isinstance(data_in, Bits):
return data_in.tobytes()
elif isinstance(data_in, list):
return bytes(",".join(data_in), "utf-16")
else:
return bytes(data_in)
def try_b64encode(data_in):
"""
Encode string to base64
:param data_in: data to transform
:type data_in: str
:rtype str
:return base64 string
"""
try:
return b64encode(data_in)
except (TypeError, Error):
return data_in
def container_name_to_param(container_name):
"""
Split container name and provides name of related parameter
:param container_name: container name
:type container_name: str
:return: param
:rtype: str
"""
return container_name.split("|")[-1]
def init_pycurl(debug=False):
"""
Provides an instances of pycurl with basic configuration
:param debug: confugres verbosity of http client
:tpye debug: bool
:return: pycurl instance
"""
_curl = pycurl.Curl()
_curl.setopt(pycurl.SSL_OPTIONS, pycurl.SSLVERSION_TLSv1_2)
_curl.setopt(pycurl.SSL_VERIFYPEER, False)
_curl.setopt(pycurl.SSL_VERIFYHOST, False)
_curl.setopt(pycurl.VERBOSE, debug)
_curl.setopt(pycurl.TIMEOUT, 10)
_curl.setopt(pycurl.COOKIEFILE, "")
_curl.setopt(pycurl.USERAGENT, get_version())
return _curl
def download_file(url, dst_file):
"""
Download file from the provided url to the defined file
:param url: url to download from
:type url: str
:param dst_file: name of destination file
:type dst_file: str
:return: None
"""
_curl = init_pycurl()
buffer = BytesIO()
_curl = pycurl.Curl()
_curl.setopt(_curl.URL, url)
_curl.setopt(_curl.WRITEDATA, buffer)
_curl.perform()
_curl.close()
buffer.seek(0)
with open(dst_file, "wb") as tmp_file:
tmp_file.write(buffer.getvalue())
buffer.close()
def get_item(json_dict, json_path):
"""
Get JSON item defined by path
:param json_dict: JSON dict contains the item we are looking for
:type json_dict: dict
:param json_path: defines the place of the object
:type json_path: list
:return: dict
"""
for item in json_path:
json_dict = json_dict.get(item, {})
return json_dict
def pretty_print(printable, limit=200):
"""
Format json data for logging
:param printable: json data to dump
:type printable: dict, str
:param limit: this amount of chars will be written
:type limit: int
:return: formatted string
:rtype: str
"""
if isinstance(printable, dict):
return json.dumps(printable, sort_keys=True)[0:limit]
elif isinstance(printable, str):
return printable[:limit]
else:
return printable
def json_data(arg_string: Optional[str]) -> dict:
"""
Transforms input string to JSON. Input must be dict or list of dicts like string
:type arg_string: str
:rtype dict
"""
if isinstance(arg_string, dict) or isinstance(arg_string, list): # support testing
arg_string = json.dumps(arg_string)
try:
_return = json.loads(arg_string)
if hasattr(_return, "append") or hasattr(_return, "keys"):
return _return
else:
raise TypeError("not list or dict")
except (TypeError, json.decoder.JSONDecodeError):
msg = "%s is not JSON", arg_string
raise argparse.ArgumentTypeError(msg)
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1", "True", "T"):
return True
elif v.lower() in ("no", "false", "f", "n", "0", "False", "F"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.") | APIFuzzer | /APIFuzzer-0.9.13-py3-none-any.whl/apifuzzer/utils.py | utils.py |
from kitty.core import KittyException
from kitty.model import Static, Template, Container
from apifuzzer.utils import get_logger
class BaseTemplate(object):
def __init__(self, name):
self.logger = get_logger(self.__class__.__name__)
self.name = name
self.content_type = ""
self.method = None
self.url = None
self.params = set()
self.data = set()
self.headers = set()
self.path_variables = set()
self.query = set()
self.cookies = set()
self.field_to_param = {
"params": self.params,
"headers": self.headers,
"data": self.data,
"path_variables": self.path_variables,
"cookies": self.cookies,
"query": self.query,
"content_type": self.content_type,
}
self.place_to_field = {
"path": self.path_variables,
"query": self.query,
"header": self.headers,
"cookie": self.query,
"body": self.data,
}
"""
Possible paramters from request docs:
:param method: method for the new :class:`Request` object.
:param bytes url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param query: (optional) query strings to send in url of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
"""
def get_stat(self):
total = 0
for field in self.field_to_param.values():
total += len(field)
self.logger.info(f"Template size: {total}, content: {self.field_to_param}")
return total
def compile_template(self):
_url = Static(name="url", value=self.url)
_method = Static(name="method", value=self.method)
template = Template(name=self.name, fields=[_url, _method])
for name, field in self.field_to_param.items():
if list(field):
try:
template.append_fields([Container(name=name, fields=field)])
except KittyException as e:
self.logger.warning(
"Failed to add {} because {}, continue processing...".format(
name, e
)
)
return template
def get_content_type(self):
return self.content_type | APIFuzzer | /APIFuzzer-0.9.13-py3-none-any.whl/apifuzzer/base_template.py | base_template.py |
import json
import tempfile
from ruamel.yaml import YAML
from ruamel.yaml.scanner import ScannerError
from apifuzzer.custom_fuzzers import (
RandomBitsField,
Utf8Chars,
UnicodeStrings,
APIFuzzerGroup,
)
from apifuzzer.exceptions import FailedToParseFileException
from apifuzzer.utils import download_file, secure_randint
def get_sample_data_by_type(param_type):
types = {
"name": "012",
"string": "asd",
"integer": 1,
"number": 667.5,
"boolean": False,
"array": [
1,
2,
3,
], # transform_data_to_bytes complains when this array contains strings.
}
return types.get(param_type, b"\x00")
def get_field_type_by_method(http_method):
fields = {"GET": "params", "POST": "data", "PUT": "data"}
return fields.get(http_method, "data")
def get_fuzz_type_by_param_type(fuzz_type):
# https://kitty.readthedocs.io/en/latest/data_model/big_list_of_fields.html#atomic-fields
# https://swagger.io/docs/specification/data-models/data-types/
string_types = [UnicodeStrings, RandomBitsField, Utf8Chars]
number_types = [UnicodeStrings, RandomBitsField]
types = {
"integer": number_types,
"float": number_types,
"double": number_types,
"int32": number_types,
"int64": number_types,
"number": number_types,
"string": string_types,
"email": string_types,
"uuid": string_types,
"uri": string_types,
"hostname": string_types,
"ipv4": string_types,
"ipv6": string_types,
"boolean": string_types,
"enum": [APIFuzzerGroup],
}
fuzzer_list = types.get(fuzz_type, string_types)
return fuzzer_list[secure_randint(0, max(len(fuzzer_list) - 1, 1))]
def container_name_to_param(container_name):
return container_name.split("|")[-1]
def get_api_definition_from_file(src_file, logger=None):
if logger:
print_func = logger
else:
print_func = print
try:
with open(src_file, mode="rb") as f:
api_definition = f.read()
# try loading as JSON first, then YAML
try:
return json.loads(api_definition.decode("utf-8"))
except ValueError as e:
print_func(
f"Failed to load input ({src_file}) as JSON because ({e}), maybe YAML?"
)
try:
yaml = YAML(typ="safe")
return yaml.load(api_definition.decode("utf-8"))
except (TypeError, ScannerError) as e:
print_func(f"Failed to load input ({src_file}) as YAML:{e}")
raise e
except (Exception, FileNotFoundError) as e:
print_func(f"Failed to parse input file ({src_file}), because: ({e}) exit")
raise FailedToParseFileException
def get_api_definition_from_url(url, temp_file=None, logger=None):
if temp_file is None:
temp_file = tempfile.NamedTemporaryFile().name
download_file(url, temp_file)
return get_api_definition_from_file(temp_file, logger=logger)
def get_base_url_form_api_src(url):
"""
provides base url from api definition source url.
:param url: url like https://example.com/api/v1/api.json
:return: url like https://example.com/api/v1
"""
splited_url = url.split("/")
return "/".join(splited_url[: len(splited_url) - 1]) | APIFuzzer | /APIFuzzer-0.9.13-py3-none-any.whl/apifuzzer/fuzz_utils.py | fuzz_utils.py |
import json
import os
import urllib.parse
from io import BytesIO
from time import time, perf_counter
import pycurl
from bitstring import Bits
from junit_xml import TestSuite, TestCase, to_xml_report_file
from kitty.targets.server import ServerTarget
from apifuzzer.apifuzzerreport import ApifuzzerReport as Report
from apifuzzer.fuzzer_target.request_base_functions import FuzzerTargetBase
from apifuzzer.utils import try_b64encode, init_pycurl, get_logger
class Return:
pass
class FuzzerTarget(FuzzerTargetBase, ServerTarget):
def not_implemented(self, func_name):
_ = func_name
pass
def __init__(self, name, base_url, report_dir, auth_headers, junit_report_path):
super(ServerTarget, self).__init__(name) # pylint: disable=E1003
super(FuzzerTargetBase, self).__init__(auth_headers) # pylint: disable=E1003
self.logger = get_logger(self.__class__.__name__)
self.base_url = base_url
self.accepted_status_codes = list(range(200, 300)) + list(range(400, 500))
self.auth_headers = auth_headers
self.report_dir = report_dir
self.junit_report_path = junit_report_path
self.failed_test = list()
self.logger.info("Logger initialized")
self.resp_headers = dict()
self.transmit_start_test = None
def pre_test(self, test_num):
"""
Called when a test is started
"""
self.test_number = test_num
self.report = Report(self.name)
if self.controller:
self.controller.pre_test(test_number=self.test_number)
for monitor in self.monitors:
monitor.pre_test(test_number=self.test_number)
self.report.add("test_number", test_num)
self.report.add("state", "STARTED")
self.transmit_start_test = perf_counter()
def transmit(self, **kwargs):
"""
Prepares fuzz HTTP request, sends and processes the response
:param kwargs: url, method, params, querystring, etc
:return:
"""
self.logger.debug("Transmit: {}".format(kwargs))
try:
_req_url = list()
for url_part in self.base_url, kwargs["url"]:
if isinstance(url_part, Bits):
url_part = url_part.tobytes()
if isinstance(url_part, bytes):
url_part = url_part.decode()
_req_url.append(url_part.strip("/"))
kwargs.pop("url")
# Replace back the placeholder for '/'
# (this happens in expand_path_variables,
# but if we don't have any path_variables, it won't)
request_url = "/".join(_req_url).replace("+", "/")
query_params = None
if kwargs.get("params") is not None:
self.logger.debug(
("Adding query params: {}".format(kwargs.get("params", {})))
)
query_params = self.format_pycurl_query_param(
request_url, kwargs.get("params", {})
)
kwargs.pop("params")
if kwargs.get("path_variables") is not None:
request_url = self.expand_path_variables(
request_url, kwargs.get("path_variables")
)
kwargs.pop("path_variables")
if kwargs.get("data") is not None:
kwargs["data"] = self.fix_data(kwargs.get("data"))
if query_params is not None:
request_url = "{}{}".format(request_url, query_params)
method = kwargs["method"]
content_type = kwargs.get("content_type")
kwargs.pop("content_type", None)
self.logger.info("Request URL : {} {}".format(method, request_url))
if kwargs.get("data") is not None:
self.logger.info(
"Request data:{}".format(json.dumps(dict(kwargs.get("data"))))
)
if isinstance(method, Bits):
method = method.tobytes()
if isinstance(method, bytes):
method = method.decode()
kwargs.pop("method")
kwargs["headers"] = self.compile_headers(kwargs.get("headers"))
self.logger.debug(
"Request url:{}\nRequest method: {}\nRequest headers: {}\nRequest body: {}".format(
request_url,
method,
json.dumps(dict(kwargs.get("headers", {})), indent=2),
kwargs.get("data"),
)
)
self.report.set_status(Report.PASSED)
self.report.add("request_url", request_url)
self.report.add("request_method", method)
self.report.add(
"request_headers", json.dumps(dict(kwargs.get("headers", {})))
)
try:
resp_buff_hdrs = BytesIO()
resp_buff_body = BytesIO()
buffer = BytesIO()
_curl = init_pycurl()
_curl.setopt(pycurl.URL, self.format_pycurl_url(request_url))
_curl.setopt(pycurl.HEADERFUNCTION, self.header_function)
_curl.setopt(pycurl.POST, len(kwargs.get("data", {}).items()))
_curl.setopt(pycurl.CUSTOMREQUEST, method)
headers = kwargs["headers"]
if content_type:
self.logger.debug(f"Adding Content-Type: {content_type} header")
headers.update({"Content-Type": content_type})
_curl.setopt(pycurl.HTTPHEADER, self.format_pycurl_header(headers))
if content_type == "multipart/form-data":
post_data = list()
for k, v in kwargs.get("data", {}).items():
post_data.append((k, v))
_curl.setopt(pycurl.HTTPPOST, post_data)
elif content_type == "application/json":
_json_data = (
json.dumps(kwargs.get("data", {}), ensure_ascii=True)
.encode("utf-8")
.decode("utf-8", "ignore")
)
_curl.setopt(pycurl.POSTFIELDS, _json_data)
else:
# default content type: application/x-www-form-urlencoded
_curl.setopt(
pycurl.POSTFIELDS,
urllib.parse.urlencode(kwargs.get("data", {})),
)
_curl.setopt(pycurl.HEADERFUNCTION, resp_buff_hdrs.write)
_curl.setopt(pycurl.WRITEFUNCTION, resp_buff_body.write)
for retries in reversed(range(0, 3)):
try:
_curl.perform()
# TODO: Handle this: pycurl.error: (3, 'Illegal characters found in URL')
except pycurl.error as e:
self.logger.warning(f"Failed to send request because of {e}")
except Exception as e:
if retries:
self.logger.error(
"Retrying... ({}) because {}".format(retries, e)
)
else:
raise e
_return = Return()
_return.status_code = _curl.getinfo(pycurl.RESPONSE_CODE)
_return.headers = self.resp_headers
_return.content = buffer.getvalue()
_return.request = Return()
_return.request.headers = kwargs.get("headers", {})
_return.request.body = kwargs.get("data", {})
_curl.close()
except Exception as e:
self.logger.exception(e)
self.report.set_status(Report.FAILED)
self.logger.error("Request failed, reason: {}".format(e))
# self.report.add('request_sending_failed', e.msg if hasattr(e, 'msg') else e)
self.report.add("request_method", method)
return
# overwrite request headers in report, add auto generated ones
self.report.add(
"request_headers",
try_b64encode(json.dumps(dict(_return.request.headers))),
)
self.logger.debug(
"Response code:{}\nResponse headers: {}\nResponse body: {}".format(
_return.status_code,
json.dumps(dict(_return.headers), indent=2),
_return.content,
)
)
self.report.add("request_body", _return.request.body)
self.report.add("response", _return.content.decode())
status_code = _return.status_code
if not status_code:
self.logger.warning(f"Failed to parse http response code, continue...")
self.report.set_status(Report.PASSED)
elif status_code not in self.accepted_status_codes:
self.report.add("parsed_status_code", status_code)
self.report_add_basic_msg(
("Return code %s is not in the expected list:", status_code)
)
return _return
except (
UnicodeDecodeError,
UnicodeEncodeError,
) as e: # request failure such as InvalidHeader
self.report_add_basic_msg(
("Failed to parse http response code, exception occurred: %s", e)
)
def post_test(self, test_num):
"""Called after a test is completed, perform cleanup etc."""
if self.report.get("report") is None:
self.report.add("reason", self.report.get_status())
super(ServerTarget, self).post_test(test_num) # pylint: disable=E1003
if self.report.get_status() != Report.PASSED:
if self.junit_report_path:
test_case = TestCase(
name=self.test_number,
status=self.report.get_status(),
timestamp=time(),
elapsed_sec=perf_counter() - self.transmit_start_test
)
test_case.add_failure_info(message=json.dumps(self.report.to_dict()))
self.failed_test.append(test_case)
self.save_report_to_disc()
def save_report_to_disc(self):
self.logger.info("Report: {}".format(self.report.to_dict()))
try:
if not os.path.exists(os.path.dirname(self.report_dir)):
try:
os.makedirs(os.path.dirname(self.report_dir))
except OSError:
pass
with open(
"{}/{}_{}.json".format(self.report_dir, self.test_number, time()), "w"
) as report_dump_file:
report_dump_file.write(json.dumps(self.report.to_dict()))
except Exception as e:
self.logger.error(
'Failed to save report "{}" to {} because: {}'.format(
self.report.to_dict(), self.report_dir, e
)
)
def report_add_basic_msg(self, msg):
self.report.set_status(Report.FAILED)
self.logger.warning(msg)
self.report.failed(msg)
def teardown(self):
if len(self.failed_test):
test_cases = self.failed_test
else:
test_cases = list()
test_cases.append(TestCase(name="Fuzz test succeed", status="Pass"))
if self.junit_report_path:
with open(self.junit_report_path, "w") as report_file:
to_xml_report_file(
report_file,
[TestSuite(name="API Fuzzer", test_cases=test_cases, timestamp=time())],
prettyprint=True
)
super(ServerTarget, self).teardown() # pylint: disable=E1003 | APIFuzzer | /APIFuzzer-0.9.13-py3-none-any.whl/apifuzzer/fuzzer_target/fuzz_request_sender.py | fuzz_request_sender.py |
import pycurl
import requests
from apifuzzer.fuzz_utils import container_name_to_param
from apifuzzer.utils import get_logger
from apifuzzer.version import get_version
class FuzzerTargetBase:
def __init__(self, auth_headers):
self._last_sent_request = None
self.auth_headers = auth_headers
self.logger = get_logger(self.__class__.__name__)
self.logger.info("Logger initialized")
self.resp_headers = dict()
self.chop_left = True
self.chop_right = True
def compile_headers(self, fuzz_header=None):
"""
Using the fuzzer headers plus the header(s) defined at cli parameter this puts together a dict which will be
used at the reques
:type fuzz_header: list, dict, None
"""
_header = requests.utils.default_headers()
_header.update(
{
"User-Agent": get_version(),
}
)
if isinstance(fuzz_header, dict):
for k, v in fuzz_header.items():
fuzz_header_name = container_name_to_param(k)
self.logger.debug(
"Adding fuzz header: {}->{}".format(fuzz_header_name, v)
)
_header[fuzz_header_name] = v
if isinstance(self.auth_headers, list):
for auth_header_part in self.auth_headers:
_header.update(auth_header_part)
else:
_header.update(self.auth_headers)
return _header
def header_function(self, header_line):
header_line = header_line.decode("iso-8859-1")
if ":" not in header_line:
return
name, value = header_line.split(":", 1)
self.resp_headers[name.strip().lower()] = value.strip()
@staticmethod
def dict_to_query_string(query_strings):
"""
Transforms dictionary to query string format
:param query_strings: dictionary
:type query_strings: dict
:return: query string
:rtype: str
"""
_tmp_list = list()
for query_string_key in query_strings.keys():
_tmp_list.append(
"{}={}".format(query_string_key, query_strings[query_string_key])
)
return "?" + "&".join(_tmp_list)
def format_pycurl_query_param(self, url, query_params):
"""
Prepares fuzz query string by removing parts if necessary
:param url: url used only to provide realistic url for pycurl
:type url: str
:param query_params: query strings in dict format
:type query_params: dict
:rtype: str
"""
_dummy_curl = pycurl.Curl()
_tmp_query_params = dict()
for k, v in query_params.items():
original_value = v
iteration = 0
self.chop_left = True
self.chop_right = True
while True:
iteration = iteration + 1
_test_query_params = _tmp_query_params.copy()
_query_param_name = container_name_to_param(k)
_test_query_params[_query_param_name] = v
try:
_dummy_curl.setopt(
pycurl.URL,
"{}{}".format(
url, self.dict_to_query_string(_test_query_params)
),
)
_tmp_query_params[_query_param_name] = v
break
except (UnicodeEncodeError, ValueError) as e:
self.logger.debug(
"{} Problem adding ({}) as query param. Issue was:{}".format(
iteration, k, e
)
)
if len(v):
v = self.chop_fuzz_value(
original_fuzz_value=original_value, fuzz_value=v
)
else:
self.logger.info(
"The whole query param was removed, using empty string instead"
)
_tmp_query_params[_query_param_name] = ""
break
except Exception as e: # pylint: disable=broad-exception
self.logger.error(
"Unexpected exception ({}) while processing: {}".format(e, k)
)
self.logger.warning("Returning: {}".format(_tmp_query_params))
return self.dict_to_query_string(_tmp_query_params)
def format_pycurl_url(self, url):
"""
Prepares fuzz URL for pycurl removing elements if necessary
:param url: URL string prepared earlier
:type url: str
:return: pycurl compliant URL
"""
self.logger.debug("URL to process: %s", url)
_dummy_curl = pycurl.Curl()
url_fields = url.split("/")
_tmp_url_list = list()
for part in url_fields:
self.logger.debug("Processing URL part: {}".format(part))
original_value = part
iteration = 0
self.chop_left = True
self.chop_right = True
while True:
iteration = iteration + 1
try:
_test_list = list()
_test_list = _tmp_url_list[::]
_test_list.append(part)
_dummy_curl.setopt(pycurl.URL, "/".join(_test_list))
self.logger.debug("Adding %s to the url: %s", part, _tmp_url_list)
_tmp_url_list.append(part)
break
except (UnicodeEncodeError, ValueError) as e:
self.logger.debug(
"{} Problem adding ({}) to the url. Issue was:{}".format(
iteration, part, e
)
)
if len(part):
part = self.chop_fuzz_value(
original_fuzz_value=original_value, fuzz_value=part
)
else:
self.logger.info(
"The whole url part was removed, using empty string instead"
)
_tmp_url_list.append("-")
break
_return = "/".join(_tmp_url_list)
self.logger.info("URL to be used: %s", _return)
return _return
def chop_fuzz_value(self, original_fuzz_value, fuzz_value):
"""
Prepares fuzz parameter for pycurl removing elements if necessary
:param original_fuzz_value: original value of the filed
:param fuzz_value: value modified in the previous run
:return: fuzz value after chopping
"""
if self.chop_left:
self.logger.debug(
"Remove first character from value, current length: %s", len(fuzz_value)
)
fuzz_value = fuzz_value[1:]
if len(fuzz_value) == 0:
self.chop_left = False
fuzz_value = original_fuzz_value
elif self.chop_right:
self.logger.debug(
"Remove last character from value, current length: %s", len(fuzz_value)
)
fuzz_value = fuzz_value[:-1]
if len(fuzz_value) == 1:
self.chop_left = False
return fuzz_value
def format_pycurl_header(self, headers):
"""
Pycurl and other http clients are picky, so this function tries to put everyting into the field as it can.
:param headers: http headers
:return: http headers
:rtype: list of dicts
"""
_dummy_curl = pycurl.Curl()
_tmp = dict()
_return = list()
for k, v in headers.items():
original_value = v
iteration = 0
self.chop_left = True
self.chop_right = True
while True:
iteration = iteration + 1
try:
_dummy_curl.setopt(
pycurl.HTTPHEADER, ["{}: {}".format(k, v).encode()]
)
_tmp[k] = v
break
except ValueError as e:
self.logger.debug(
"{} Problem at adding {} to the header. Issue was:{}".format(
iteration, k, e
)
)
if len(v):
v = self.chop_fuzz_value(
original_fuzz_value=original_value, fuzz_value=v
)
else:
self.logger.info(
"The whole header value was removed, using empty string instead"
)
_tmp[k] = ""
break
for k, v in _tmp.items():
_return.append("{}: {}".format(k, v).encode())
return _return
def expand_path_variables(self, url, path_parameters):
if not isinstance(path_parameters, dict):
self.logger.warning(
"Path_parameters {} does not in the desired format,received: {}".format(
path_parameters, type(path_parameters)
)
)
return url
formatted_url = url
for path_key, path_value in path_parameters.items():
self.logger.debug(
"Processing: path_key: {} , path_variable: {}".format(
path_key, path_value
)
)
path_parameter = container_name_to_param(path_key)
url_path_parameter = "{%PATH_PARAM%}".replace(
"%PATH_PARAM%", path_parameter
)
tmp_url = formatted_url.replace(url_path_parameter, path_value)
if tmp_url == formatted_url:
self.logger.warning(
"{} was not in the url: {}, adding it".format(
url_path_parameter, url
)
)
tmp_url += "&{}={}".format(path_parameter, path_value)
formatted_url = tmp_url
self.logger.debug("Compiled url in {}, out: {}".format(url, formatted_url))
return formatted_url.replace("{", "").replace("}", "").replace("+", "/")
@staticmethod
def fix_data(data):
new_data = {}
for data_key, data_value in data.items():
new_data[container_name_to_param(data_key)] = data_value
return new_data | APIFuzzer | /APIFuzzer-0.9.13-py3-none-any.whl/apifuzzer/fuzzer_target/request_base_functions.py | request_base_functions.py |
import sys
import logging
import click
import platform
from apigtool.utility import work_on_apigs
from apigtool.utility import list_apigs
from apigtool.auth_state import set_iam_auth
logging.basicConfig(
level=logging.INFO,
stream=sys.stderr,
format='[%(levelname)s] %(asctime)s (%(module)s) %(message)s',
datefmt='%Y/%m/%d-%H:%M:%S'
)
valid_systems = [
'linux',
'darwin'
]
@click.group()
@click.version_option(version='0.3.0')
def cli():
pass
@cli.command()
@click.option('--apig', '-a', help='APIG of interest', required=True)
@click.option('--stage', '-s', help='APIG stage of interest', required=True)
@click.option('--profile', '-p', help='credential profile')
@click.option('--region', '-r', help='AWS region')
@click.option('--on', '-o', help='on: true | false')
def log(apig, stage, profile, region, on):
'''
Work on logging for an APIG
'''
work_on_apigs(
apig=apig,
stage=stage,
profile=profile,
region=region,
on=on
)
@cli.command()
@click.option('--profile', '-p', help='credential profile')
@click.option('--region', '-r', help='AWS region')
def list(profile, region):
'''
Work on listing the API's
'''
list_apigs(
profile=profile,
region=region
)
@cli.command()
@click.option('--profile', help='credential profile')
@click.option('--region', help='AWS region')
@click.option('--api-name', '-a', help='name of the API of interest', required=True)
@click.option('--stage', '-s', help='deployment stage', required=True)
@click.option('--path', '-p', help='deployment stage', required=True)
@click.option('--on', '-o', help='on: true | false', required=True)
@click.option('--method', '-m', help='HTTP method on the resource')
def authiam(**kwargs):
'''
Turn on IAM authorization
'''
set_iam_auth(**kwargs)
def verify_real_system():
try:
current_system = platform.system().lower()
return current_system in valid_systems
except:
return False
if not verify_real_system():
print('error: unsupported system')
sys.exit(1) | APIGtool | /APIGtool-0.3.0.tar.gz/APIGtool-0.3.0/apigtool/command.py | command.py |
import sys
import json
import logging
import boto3
from apigtool.utility import date_converter
logger = logging.getLogger()
logger.setLevel(logging.INFO)
default_region = 'us-east-1'
'''
{ "patchOperations" : [
{
"op" : "replace",
"path" : "/*/*/logging/loglevel",
"value" : "INFO"
},
}
'''
def set_iam_auth(**kwargs):
on_switch = False
api_name = kwargs.get('api_name', None)
stage = kwargs.get('stage', None)
path = kwargs.get('path', None)
profile = kwargs.get('profile', None)
region = kwargs.get('region', None)
on = kwargs.get('on', None)
method = kwargs.get('method', None)
if on:
if on.lower() == 'true':
on_switch = True
elif on.lower() == 'false':
on_switch = False
else:
logger.error(f'on switch must true or false given {on=}')
sys.exit(2)
if method is None:
method = 'ANY'
logger.info(' api_name: {}'.format(api_name))
logger.info(' stage: {}'.format(stage))
logger.info(' path: {}'.format(path))
logger.info('on_switch: {}'.format(on_switch))
logger.info(' profile: {}'.format(profile))
logger.info(' region: {}'.format(region))
clients = _init_boto3_clients(
['apigateway'],
profile,
region
)
if clients is None:
logger.error('failed to create clients')
return
api_id = find_api(api_name, stage, clients.get('apigateway'))
logger.info(f'{api_id=}')
resource_id = find_resource(api_id, path, clients.get('apigateway'))
logger.info(f'{resource_id=}')
current_state = get_current_state(api_id, resource_id, method, clients.get('apigateway'))
if current_state == on_switch:
logger.info('no change needed')
return True
else:
logger.info('change needed')
return (
set_current_state(
api_id,
resource_id,
stage,
method,
on_switch,
clients.get('apigateway'))
)
def set_current_state(api_id, resource_id, stage, http_method, on_switch, apig_client):
try:
if on_switch:
auth_type = 'AWS_IAM'
else:
auth_type = 'NONE'
response = apig_client.update_method(
restApiId=api_id,
resourceId=resource_id,
httpMethod=http_method,
patchOperations=[
{
'op': 'replace',
'path': '/authorizationType',
'value': auth_type
}
]
)
logger.info('put_method() response:')
logger.info(json.dumps(response, indent=2, default=date_converter))
response = apig_client.create_deployment(restApiId=api_id, stageName=stage)
logger.info('create_deployment() response:')
logger.info(json.dumps(response, indent=2, default=date_converter))
return False
except Exception as ruh_roh:
logger.error(ruh_roh, exc_info=True)
return False
def get_current_state(api_id, resource_id, http_method, apig_client):
current_state = False
try:
response = apig_client.get_method(
restApiId=api_id,
resourceId=resource_id,
httpMethod=http_method
)
current_state = response.get('authorizationType', '42') == 'AWS_IAM'
logger.debug(json.dumps(response, indent=2, default=date_converter))
logger.info(f'current authorizationType is AWS_IAM: {current_state}')
except Exception as ruh_roh:
logger.error(ruh_roh, exc_info=True)
return current_state
def find_resource(api_id, path, apig_client):
current_position = '__first___'
try:
while current_position:
if current_position == '__first___':
response = apig_client.get_resources(restApiId=api_id)
else:
response = apig_client.get_resources(restApiId=api_id, position=current_position)
current_position = response.get('position', None)
for resource in response.get('items', []):
candidate_path = resource.get('path', 'unknown')
resource_id = resource.get('id', 'unknown')
if candidate_path == path:
return resource_id
except Exception as ruh_roh:
logger.error(ruh_roh, exc_info=True)
logger.error('could not find resource Id, exiting')
sys.exit(1)
def find_api(api_name, stage, apig_client):
current_position = '__first___'
try:
while current_position:
if current_position == '__first___':
response = apig_client.get_rest_apis()
else:
response = apig_client.get_rest_apis(position=current_position)
current_position = response.get('position', None)
for apig in response.get('items', []):
name = apig.get('name', 'unknown')
api_id = apig.get('id', 'unknown')
if name == api_name:
# we found it
r = apig_client.get_stages(restApiId=api_id)
logger.debug(json.dumps(r, indent=2, default=date_converter))
stages = [stage['stageName'] for stage in r.get('item')]
if stage in stages:
return api_id
except Exception as ruh_roh:
logger.error(ruh_roh, exc_info=True)
logger.error('could not find API Id, exiting')
sys.exit(1)
def _init_boto3_clients(services, profile, region):
"""
Creates boto3 clients
Args:
profile - CLI profile to use
region - where do you want the clients
Returns:
Good or Bad; True or False
"""
try:
if not region:
region = default_region
clients = {}
session = None
if profile and region:
session = boto3.session.Session(profile_name=profile, region_name=region)
elif profile:
session = boto3.session.Session(profile_name=profile)
elif region:
session = boto3.session.Session(region_name=region)
else:
session = boto3.session.Session()
for svc in services:
clients[svc] = session.client(svc)
logger.info('client for %s created', svc)
return clients
except Exception as wtf:
logger.error(wtf, exc_info=True)
return None | APIGtool | /APIGtool-0.3.0.tar.gz/APIGtool-0.3.0/apigtool/auth_state.py | auth_state.py |
import json
import logging
from bson import json_util
import boto3
import datetime
from tabulate import tabulate
logger = logging.getLogger()
logger.setLevel(logging.INFO)
default_region = 'us-east-1'
'''
{ "patchOperations" : [
{
"op" : "replace",
"path" : "/*/*/logging/loglevel",
"value" : "INFO"
},
}
'''
def date_converter(o):
if isinstance(o, datetime.datetime):
return o.__str__()
return None
def work_on_apigs(**kwargs):
change_state = False
on_switch = False
apig = kwargs.get('apig', None)
stage = kwargs.get('stage', None)
profile = kwargs.get('profile', None)
region = kwargs.get('region', None)
on = kwargs.get('on', None)
if on:
if on.lower() == 'true':
on_switch = True
change_state = True
elif on.lower() == 'false':
on_switch = False
change_state = True
logger.info('apig: {}'.format(apig))
logger.info('stage: {}'.format(stage))
logger.info('profile: {}'.format(profile))
logger.info('region: {}'.format(region))
logger.info('on_switch: {}'.format(on_switch))
logger.info('change_state: {}'.format(change_state))
clients = _init_boto3_clients(
['apigateway'],
profile,
region
)
if change_state:
_change_log_state(
apig,
stage,
on_switch,
clients.get('apigateway')
)
print('\nCloudWatch logs found in: API-Gateway-Execution-Logs_{}/{}'.format(apig, stage))
def list_apigs(**kwargs):
current_position = '__first___'
try:
profile = kwargs.get('profile', None)
region = kwargs.get('region', None)
clients = _init_boto3_clients(
['apigateway'],
profile,
region
)
rows = []
while current_position:
if current_position == '__first___':
response = clients.get('apigateway').get_rest_apis()
else:
response = clients.get('apigateway').get_rest_apis(position=current_position)
current_position = response.get('position', None)
for apig in response.get('items', []):
name = apig.get('name', 'unknown')
app_id = apig.get('id', 'unknown')
r = clients.get('apigateway').get_stages(restApiId=app_id)
stages = [stage['stageName'] for stage in r.get('item')]
row = [name, app_id, json.dumps(stages)]
rows.append(row)
# print('{}({}): {}'.format(name, app_id, json.dumps(stages)))
print(tabulate(rows, headers=['API Name', 'Id', 'Stages']))
except Exception as ruh_roh:
logger.error(ruh_roh, exc_info=True)
return False
def _change_log_state(apig, stage, on_switch, client):
try:
if on_switch:
response = client.update_stage(
restApiId=apig,
stageName=stage,
patchOperations=[
{
"op": "replace",
"path": "/*/*/logging/loglevel",
"value": "INFO"
},
{
"op": "replace",
"path": "/*/*/metrics/enabled",
"value": 'true'
},
{
"op": "replace",
"path": "/*/*/logging/dataTrace",
"value": 'true'
}
]
)
else:
response = client.update_stage(
restApiId=apig,
stageName=stage,
patchOperations=[
{
"op": "replace",
"path": "/*/*/logging/loglevel",
"value": "OFF"
},
{
"op": "replace",
"path": "/*/*/metrics/enabled",
"value": 'false'
},
{
"op": "replace",
"path": "/*/*/logging/dataTrace",
"value": 'false'
}
]
)
logger.info(json.dumps(
response,
default=json_util.default,
indent=2
))
return True
except Exception as ruh_roh:
logger.error(ruh_roh, exc_info=False)
return False
def _init_boto3_clients(services, profile, region):
"""
Creates boto3 clients
Args:
profile - CLI profile to use
region - where do you want the clients
Returns:
Good or Bad; True or False
"""
try:
if not region:
region = default_region
clients = {}
session = None
if profile and region:
session = boto3.session.Session(profile_name=profile, region_name=region)
elif profile:
session = boto3.session.Session(profile_name=profile)
elif region:
session = boto3.session.Session(region_name=region)
else:
session = boto3.session.Session()
for svc in services:
clients[svc] = session.client(svc)
logger.info('client for %s created', svc)
return clients
except Exception as wtf:
logger.error(wtf, exc_info=True)
return None | APIGtool | /APIGtool-0.3.0.tar.gz/APIGtool-0.3.0/apigtool/utility.py | utility.py |
import re
import sys
import datetime
import calendar
import email.utils as eut
from time import mktime
import jsonpickle
import dateutil.parser
from requests.utils import quote
class APIHelper(object):
"""A Helper Class for various functions associated with API Calls.
This class contains static methods for operations that need to be
performed during API requests. All of the methods inside this class are
static methods, there is no need to ever initialise an instance of this
class.
"""
@staticmethod
def merge_dicts(dict1, dict2):
"""Merges two dictionaries into one as a shallow copy.
Args:
dict1 (dict): The first dictionary.
dict2 (dict): The second dictionary.
Returns:
dict: A dictionary containing key value pairs
from both the argument dictionaries. In the case
of a key conflict, values from dict2 are used
and those from dict1 are lost.
"""
temp = dict1.copy()
temp.update(dict2)
return temp
@staticmethod
def json_serialize(obj):
"""JSON Serialization of a given object.
Args:
obj (object): The object to serialize.
Returns:
str: The JSON serialized string of the object.
"""
if obj is None:
return None
# Resolve any Names if it's one of our objects that needs to have this called on
if isinstance(obj, list):
value = list()
for item in obj:
if hasattr(item, "_names"):
value.append(APIHelper.to_dictionary(item))
else:
value.append(item)
obj = value
else:
if hasattr(obj, "_names"):
obj = APIHelper.to_dictionary(obj)
return jsonpickle.encode(obj, False)
@staticmethod
def json_deserialize(json, unboxing_function=None, as_dict=False):
"""JSON Deserialization of a given string.
Args:
json (str): The JSON serialized string to deserialize.
Returns:
dict: A dictionary representing the data contained in the
JSON serialized string.
"""
if json is None:
return None
try:
decoded = jsonpickle.decode(json)
except ValueError:
return json
if unboxing_function is None:
return decoded
if as_dict:
return {k: unboxing_function(v) for k, v in decoded.items()}
elif isinstance(decoded, list):
return [unboxing_function(element) for element in decoded]
else:
return unboxing_function(decoded)
@staticmethod
def get_content_type(value):
"""Get content type header for oneof.
Args:
value: The value passed by the user.
Returns:
dict: A dictionary representing the data contained in the
JSON serialized string.
"""
if value is None:
return None
primitive = (int, str, bool, float)
if type(value) in primitive:
return 'text/plain; charset=utf-8'
else:
return 'application/json; charset=utf-8'
@staticmethod
def get_schema_path(path):
"""Return the Schema's path
Returns:
string : returns Correct schema path
"""
path = path.replace('\\models', '\\schemas').replace('/models', '/schemas').replace(".py", ".json")
return path
@staticmethod
def serialize_array(key, array, formatting="indexed", is_query=False):
"""Converts an array parameter to a list of key value tuples.
Args:
key (str): The name of the parameter.
array (list): The value of the parameter.
formatting (str): The type of key formatting expected.
is_query (bool): Decides if the parameters are for query or form.
Returns:
list: A list with key value tuples for the array elements.
"""
tuples = []
if sys.version_info[0] < 3:
serializable_types = (str, int, long, float, bool, datetime.date, APIHelper.CustomDate)
else:
serializable_types = (str, int, float, bool, datetime.date, APIHelper.CustomDate)
if isinstance(array[0], serializable_types):
if formatting == "unindexed":
tuples += [("{0}[]".format(key), element) for element in array]
elif formatting == "indexed":
tuples += [("{0}[{1}]".format(key, index), element) for index, element in enumerate(array)]
elif formatting == "plain":
tuples += [(key, element) for element in array]
elif is_query:
if formatting == "csv":
tuples += [(key, ",".join(str(x) for x in array))]
elif formatting == "psv":
tuples += [(key, "|".join(str(x) for x in array))]
elif formatting == "tsv":
tuples += [(key, "\t".join(str(x) for x in array))]
else:
raise ValueError("Invalid format provided.")
else:
tuples += [("{0}[{1}]".format(key, index), element) for index, element in enumerate(array)]
return tuples
@staticmethod
def append_url_with_template_parameters(url, parameters):
"""Replaces template parameters in the given url.
Args:
url (str): The query url string to replace the template parameters.
parameters (dict): The parameters to replace in the url.
Returns:
str: URL with replaced parameters.
"""
# Parameter validation
if url is None:
raise ValueError("URL is None.")
if parameters is None:
return url
# Iterate and replace parameters
for key in parameters:
value = parameters[key]['value']
encode = parameters[key]['encode']
replace_value = ''
# Load parameter value
if value is None:
replace_value = ''
elif isinstance(value, list):
replace_value = "/".join((quote(str(x), safe='') if encode else str(x)) for x in value)
else:
replace_value = quote(str(value), safe='') if encode else str(value)
url = url.replace('{{{0}}}'.format(key), str(replace_value))
return url
@staticmethod
def append_url_with_query_parameters(url,
parameters,
array_serialization="indexed"):
"""Adds query parameters to a URL.
Args:
url (str): The URL string.
parameters (dict): The query parameters to add to the URL.
array_serialization (str): The format of array parameter serialization.
Returns:
str: URL with added query parameters.
"""
# Parameter validation
if url is None:
raise ValueError("URL is None.")
if parameters is None:
return url
parameters = APIHelper.process_complex_types_parameters(parameters, array_serialization)
for index, value in enumerate(parameters):
key = value[0]
val = value[1]
seperator = '&' if '?' in url else '?'
if value is not None:
url += "{0}{1}={2}".format(seperator, key, quote(str(val), safe=''))
return url
@staticmethod
def process_complex_types_parameters(query_parameters, array_serialization):
processed_params = []
for key, value in query_parameters.items():
processed_params.extend(
APIHelper.form_encode(value, key, array_serialization=array_serialization, is_query=True))
return processed_params
@staticmethod
def clean_url(url):
"""Validates and processes the given query Url to clean empty slashes.
Args:
url (str): The given query Url to process.
Returns:
str: Clean Url as string.
"""
# Ensure that the urls are absolute
regex = "^https?://[^/]+"
match = re.match(regex, url)
if match is None:
raise ValueError('Invalid Url format.')
protocol = match.group(0)
index = url.find('?')
query_url = url[len(protocol): index if index != -1 else None]
query_url = re.sub("//+", "/", query_url)
parameters = url[index:] if index != -1 else ""
return protocol + query_url + parameters
@staticmethod
def form_encode_parameters(form_parameters,
array_serialization="indexed"):
"""Form encodes a dictionary of form parameters
Args:
form_parameters (dictionary): The given dictionary which has
atleast one model to form encode.
array_serialization (str): The format of array parameter serialization.
Returns:
dict: A dictionary of form encoded properties of the model.
"""
encoded = []
for key, value in form_parameters.items():
encoded += APIHelper.form_encode(value, key, array_serialization)
return encoded
@staticmethod
def form_encode(obj,
instance_name,
array_serialization="indexed", is_query=False):
"""Encodes a model in a form-encoded manner such as person[Name]
Args:
obj (object): The given Object to form encode.
instance_name (string): The base name to appear before each entry
for this object.
array_serialization (string): The format of array parameter serialization.
is_query (bool): Decides if the parameters are for query or form.
Returns:
dict: A dictionary of form encoded properties of the model.
"""
retval = []
# If we received an object, resolve it's field names.
if hasattr(obj, "_names"):
obj = APIHelper.to_dictionary(obj)
if obj is None:
return []
elif isinstance(obj, list):
for element in APIHelper.serialize_array(instance_name, obj, array_serialization, is_query):
retval += APIHelper.form_encode(element[1], element[0], array_serialization, is_query)
elif isinstance(obj, dict):
for item in obj:
retval += APIHelper.form_encode(obj[item], instance_name + "[" + item + "]", array_serialization, is_query)
else:
retval.append((instance_name, obj))
return retval
@staticmethod
def to_dictionary(obj):
"""Creates a dictionary representation of a class instance. The
keys are taken from the API description and may differ from language
specific variable names of properties.
Args:
obj: The object to be converted into a dictionary.
Returns:
dictionary: A dictionary form of the model with properties in
their API formats.
"""
dictionary = dict()
# Loop through all properties in this model
for name in obj._names:
value = getattr(obj, name)
if isinstance(value, list):
# Loop through each item
dictionary[obj._names[name]] = list()
for item in value:
dictionary[obj._names[name]].append(APIHelper.to_dictionary(item) if hasattr(item, "_names") else item)
elif isinstance(value, dict):
# Loop through each item
dictionary[obj._names[name]] = dict()
for key in value:
dictionary[obj._names[name]][key] = APIHelper.to_dictionary(value[key]) if hasattr(value[key], "_names") else value[key]
else:
dictionary[obj._names[name]] = APIHelper.to_dictionary(value) if hasattr(value, "_names") else value
# Return the result
return dictionary
@staticmethod
def when_defined(func, value):
return func(value) if value else None
class CustomDate(object):
""" A base class for wrapper classes of datetime.
This class contains methods which help in
appropriate serialization of datetime objects.
"""
def __init__(self, dtime, value=None):
self.datetime = dtime
if not value:
self.value = self.from_datetime(dtime)
else:
self.value = value
def __repr__(self):
return str(self.value)
def __getstate__(self):
return self.value
def __setstate__(self, state):
pass
class HttpDateTime(CustomDate):
""" A wrapper class for datetime to support HTTP date format."""
@classmethod
def from_datetime(cls, date_time):
return eut.formatdate(timeval=mktime(date_time.timetuple()),
localtime=False, usegmt=True)
@classmethod
def from_value(cls, value):
dtime = datetime.datetime.fromtimestamp(eut.mktime_tz(eut.parsedate_tz(value)))
return cls(dtime, value)
class UnixDateTime(CustomDate):
""" A wrapper class for datetime to support Unix date format."""
@classmethod
def from_datetime(cls, date_time):
return calendar.timegm(date_time.utctimetuple())
@classmethod
def from_value(cls, value):
dtime = datetime.datetime.utcfromtimestamp(float(value))
return cls(dtime, float(value))
class RFC3339DateTime(CustomDate):
""" A wrapper class for datetime to support Rfc 3339 format."""
@classmethod
def from_datetime(cls, date_time):
return date_time.isoformat()
@classmethod
def from_value(cls, value):
dtime = dateutil.parser.parse(value)
return cls(dtime, value) | APIMATIC-Caculator | /APIMATIC%20Caculator-1.0.tar.gz/APIMATIC Caculator-1.0/apimaticcalculator/api_helper.py | api_helper.py |
from enum import Enum
from apimaticcalculator.http.requests_client import RequestsClient
class Environment(Enum):
"""An enum for SDK environments"""
# This environment connect to the LIVE calculator API
PRODUCTION = 0
class Server(Enum):
"""An enum for API servers"""
CALCULATOR = 0
class Configuration(object):
"""A class used for configuring the SDK by a user.
"""
@property
def http_client(self):
return self._http_client
@property
def http_client_instance(self):
return self._http_client_instance
@property
def override_http_client_configuration(self):
return self._override_http_client_configuration
@property
def timeout(self):
return self._timeout
@property
def max_retries(self):
return self._max_retries
@property
def backoff_factor(self):
return self._backoff_factor
@property
def retry_statuses(self):
return self._retry_statuses
@property
def retry_methods(self):
return self._retry_methods
@property
def environment(self):
return self._environment
def __init__(
self, http_client_instance=None,
override_http_client_configuration=False, timeout=60, max_retries=0,
backoff_factor=2,
retry_statuses=[408, 413, 429, 500, 502, 503, 504, 521, 522, 524],
retry_methods=['GET', 'PUT'], environment=Environment.PRODUCTION
):
# The Http Client passed from the sdk user for making requests
self._http_client_instance = http_client_instance
# The value which determines to override properties of the passed Http Client from the sdk user
self._override_http_client_configuration = override_http_client_configuration
# The value to use for connection timeout
self._timeout = timeout
# The number of times to retry an endpoint call if it fails
self._max_retries = max_retries
# A backoff factor to apply between attempts after the second try.
# urllib3 will sleep for:
# `{backoff factor} * (2 ** ({number of total retries} - 1))`
self._backoff_factor = backoff_factor
# The http statuses on which retry is to be done
self._retry_statuses = retry_statuses
# The http methods on which retry is to be done
self._retry_methods = retry_methods
# Current API environment
self._environment = environment
# The Http Client to use for making requests.
self._http_client = self.create_http_client()
def clone_with(self, http_client_instance=None,
override_http_client_configuration=None, timeout=None,
max_retries=None, backoff_factor=None, retry_statuses=None,
retry_methods=None, environment=None):
http_client_instance = http_client_instance or self.http_client_instance
override_http_client_configuration = override_http_client_configuration or self.override_http_client_configuration
timeout = timeout or self.timeout
max_retries = max_retries or self.max_retries
backoff_factor = backoff_factor or self.backoff_factor
retry_statuses = retry_statuses or self.retry_statuses
retry_methods = retry_methods or self.retry_methods
environment = environment or self.environment
return Configuration(
http_client_instance=http_client_instance,
override_http_client_configuration=override_http_client_configuration,
timeout=timeout, max_retries=max_retries,
backoff_factor=backoff_factor, retry_statuses=retry_statuses,
retry_methods=retry_methods, environment=environment
)
def create_http_client(self):
return RequestsClient(
timeout=self.timeout, max_retries=self.max_retries,
backoff_factor=self.backoff_factor, retry_statuses=self.retry_statuses,
retry_methods=self.retry_methods,
http_client_instance=self.http_client_instance,
override_http_client_configuration=self.override_http_client_configuration
)
# All the environments the SDK can run in
environments = {
Environment.PRODUCTION: {
Server.CALCULATOR: 'https://examples.apimatic.io/apps/calculator'
}
}
def get_base_uri(self, server=Server.CALCULATOR):
"""Generates the appropriate base URI for the environment and the
server.
Args:
server (Configuration.Server): The server enum for which the base
URI is required.
Returns:
String: The base URI.
"""
return self.environments[self.environment][server] | APIMATIC-Caculator | /APIMATIC%20Caculator-1.0.tar.gz/APIMATIC Caculator-1.0/apimaticcalculator/configuration.py | configuration.py |
from cachecontrol import CacheControl
from requests import session
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from apimaticcalculator.http.http_client import HttpClient
from apimaticcalculator.http.http_method_enum import HttpMethodEnum
from apimaticcalculator.http.http_response import HttpResponse
class RequestsClient(HttpClient):
"""An implementation of HttpClient that uses Requests as its HTTP Client
Attributes:
timeout (int): The default timeout for all API requests.
"""
def __init__(self,
timeout=60,
cache=False,
max_retries=None,
backoff_factor=None,
retry_statuses=None,
retry_methods=None,
verify=True,
http_client_instance=None,
override_http_client_configuration=False):
"""The constructor.
Args:
timeout (float): The default global timeout(seconds).
"""
if http_client_instance == None:
self.create_default_http_cient(timeout, cache, max_retries,
backoff_factor, retry_statuses,
retry_methods, verify)
else:
if override_http_client_configuration == True:
http_client_instance.timeout = timeout
http_client_instance.session.verify = verify
adapters = http_client_instance.session.adapters
for adapter in adapters.values():
adapter.max_retries.total = max_retries
adapter.max_retries.backoff_factor = backoff_factor
adapter.max_retries.status_forcelist = retry_statuses
adapter.max_retries.allowed_methods = retry_methods
self.timeout = http_client_instance.timeout
self.session = http_client_instance.session
def create_default_http_cient(self,
timeout=60,
cache=False,
max_retries=None,
backoff_factor=None,
retry_statuses=None,
retry_methods=None,
verify=True):
self.timeout = timeout
self.session = session()
retries = Retry(total=max_retries, backoff_factor=backoff_factor,
status_forcelist=retry_statuses, allowed_methods=retry_methods)
self.session.mount('http://', HTTPAdapter(max_retries=retries))
self.session.mount('https://', HTTPAdapter(max_retries=retries))
if cache:
self.session = CacheControl(self.session)
self.session.verify = verify
def execute_as_string(self, request):
"""Execute a given HttpRequest to get a string response back
Args:
request (HttpRequest): The given HttpRequest to execute.
Returns:
HttpResponse: The response of the HttpRequest.
"""
response = self.session.request(
HttpMethodEnum.to_string(request.http_method),
request.query_url,
headers=request.headers,
params=request.query_parameters,
data=request.parameters,
files=request.files,
timeout=self.timeout
)
return self.convert_response(response, False, request)
def execute_as_binary(self, request):
"""Execute a given HttpRequest to get a binary response back
Args:
request (HttpRequest): The given HttpRequest to execute.
Returns:
HttpResponse: The response of the HttpRequest.
"""
response = self.session.request(
HttpMethodEnum.to_string(request.http_method),
request.query_url,
headers=request.headers,
params=request.query_parameters,
data=request.parameters,
files=request.files,
timeout=self.timeout
)
return self.convert_response(response, True, request)
def convert_response(self, response, binary, http_request):
"""Converts the Response object of the HttpClient into an
HttpResponse object.
Args:
response (dynamic): The original response object.
http_request (HttpRequest): The original HttpRequest object.
Returns:
HttpResponse: The converted HttpResponse object.
"""
if binary:
return HttpResponse(
response.status_code,
response.reason,
response.headers,
response.content,
http_request
)
else:
return HttpResponse(
response.status_code,
response.reason,
response.headers,
response.text,
http_request
) | APIMATIC-Caculator | /APIMATIC%20Caculator-1.0.tar.gz/APIMATIC Caculator-1.0/apimaticcalculator/http/requests_client.py | requests_client.py |
from apimaticcalculator.api_helper import APIHelper
class HttpRequest(object):
"""Information about an HTTP Request including its method, headers,
parameters, URL, and Basic Auth details
Attributes:
http_method (HttpMethodEnum): The HTTP Method that this request should
perform when called.
headers (dict): A dictionary of headers (key : value) that should be
sent along with the request.
query_url (string): The URL that the request should be sent to.
parameters (dict): A dictionary of parameters that are to be sent along
with the request in the form body of the request
"""
def __init__(self,
http_method,
query_url,
headers=None,
query_parameters=None,
parameters=None,
files=None):
"""Constructor for the HttpRequest class
Args:
http_method (HttpMethodEnum): The HTTP Method.
query_url (string): The URL to send the request to.
headers (dict, optional): The headers for the HTTP Request.
query_parameters (dict, optional): Query parameters to add in the
URL.
parameters (dict, optional): Form or body parameters to be included
in the body.
files (dict, optional): Files to be sent with the request.
"""
self.http_method = http_method
self.query_url = query_url
self.headers = headers
self.query_parameters = query_parameters
self.parameters = parameters
self.files = files
def add_header(self, name, value):
""" Add a header to the HttpRequest.
Args:
name (string): The name of the header.
value (string): The value of the header.
"""
self.headers[name] = value
def add_parameter(self, name, value):
""" Add a parameter to the HttpRequest.
Args:
name (string): The name of the parameter.
value (string): The value of the parameter.
"""
self.parameters[name] = value
def add_query_parameter(self, name, value):
""" Add a query parameter to the HttpRequest.
Args:
name (string): The name of the query parameter.
value (string): The value of the query parameter.
"""
self.query_url = APIHelper.append_url_with_query_parameters(
self.query_url,
{name: value}
)
self.query_url = APIHelper.clean_url(self.query_url) | APIMATIC-Caculator | /APIMATIC%20Caculator-1.0.tar.gz/APIMATIC Caculator-1.0/apimaticcalculator/http/http_request.py | http_request.py |
from apimaticcalculator.http.http_method_enum import HttpMethodEnum
from apimaticcalculator.http.http_request import HttpRequest
class HttpClient(object):
"""An interface for the methods that an HTTP Client must implement
This class should not be instantiated but should be used as a base class
for HTTP Client classes.
"""
def execute_as_string(self, request):
"""Execute a given HttpRequest to get a string response back
Args:
request (HttpRequest): The given HttpRequest to execute.
Returns:
HttpResponse: The response of the HttpRequest.
"""
raise NotImplementedError("Please Implement this method")
def execute_as_binary(self, request):
"""Execute a given HttpRequest to get a binary response back
Args:
request (HttpRequest): The given HttpRequest to execute.
Returns:
HttpResponse: The response of the HttpRequest.
"""
raise NotImplementedError("Please Implement this method")
def convert_response(self, response, binary):
"""Converts the Response object of the HttpClient into an
HttpResponse object.
Args:
response (dynamic): The original response object.
Returns:
HttpResponse: The converted HttpResponse object.
"""
raise NotImplementedError("Please Implement this method")
def get(self, query_url,
headers={},
query_parameters={}):
"""Create a simple GET HttpRequest object for the given parameters
Args:
query_url (string): The URL to send the request to.
headers (dict, optional): The headers for the HTTP Request.
query_parameters (dict, optional): Query parameters to add in the
URL.
Returns:
HttpRequest: The generated HttpRequest for the given paremeters.
"""
return HttpRequest(HttpMethodEnum.GET,
query_url,
headers,
query_parameters,
None,
None)
def head(self, query_url,
headers={},
query_parameters={}):
"""Create a simple HEAD HttpRequest object for the given parameters
Args:
query_url (string): The URL to send the request to.
headers (dict, optional): The headers for the HTTP Request.
query_parameters (dict, optional): Query parameters to add in the
URL.
Returns:
HttpRequest: The generated HttpRequest for the given paremeters.
"""
return HttpRequest(HttpMethodEnum.HEAD,
query_url,
headers,
query_parameters,
None,
None)
def post(self, query_url,
headers={},
query_parameters={},
parameters={},
files={}):
"""Create a simple POST HttpRequest object for the given parameters
Args:
query_url (string): The URL to send the request to.
headers (dict, optional): The headers for the HTTP Request.
query_parameters (dict, optional): Query parameters to add in the
URL.
parameters (dict, optional): Form or body parameters to be included
in the body.
files (dict, optional): Files to be sent with the request.
Returns:
HttpRequest: The generated HttpRequest for the given paremeters.
"""
return HttpRequest(HttpMethodEnum.POST,
query_url,
headers,
query_parameters,
parameters,
files)
def put(self, query_url,
headers={},
query_parameters={},
parameters={},
files={}):
"""Create a simple PUT HttpRequest object for the given parameters
Args:
query_url (string): The URL to send the request to.
headers (dict, optional): The headers for the HTTP Request.
query_parameters (dict, optional): Query parameters to add in the
URL.
parameters (dict, optional): Form or body parameters to be included
in the body.
files (dict, optional): Files to be sent with the request.
Returns:
HttpRequest: The generated HttpRequest for the given paremeters.
"""
return HttpRequest(HttpMethodEnum.PUT,
query_url,
headers,
query_parameters,
parameters,
files)
def patch(self, query_url,
headers={},
query_parameters={},
parameters={},
files={}):
"""Create a simple PATCH HttpRequest object for the given parameters
Args:
query_url (string): The URL to send the request to.
headers (dict, optional): The headers for the HTTP Request.
query_parameters (dict, optional): Query parameters to add in the
URL.
parameters (dict, optional): Form or body parameters to be included
in the body.
files (dict, optional): Files to be sent with the request.
Returns:
HttpRequest: The generated HttpRequest for the given paremeters.
"""
return HttpRequest(HttpMethodEnum.PATCH,
query_url,
headers,
query_parameters,
parameters,
files)
def delete(self, query_url,
headers={},
query_parameters={},
parameters={},
files={}):
"""Create a simple DELETE HttpRequest object for the given parameters
Args:
query_url (string): The URL to send the request to.
headers (dict, optional): The headers for the HTTP Request.
query_parameters (dict, optional): Query parameters to add in the
URL.
parameters (dict, optional): Form or body parameters to be
included in the body.
files (dict, optional): Files to be sent with the request.
Returns:
HttpRequest: The generated HttpRequest for the given paremeters.
"""
return HttpRequest(HttpMethodEnum.DELETE,
query_url,
headers,
query_parameters,
parameters,
files) | APIMATIC-Caculator | /APIMATIC%20Caculator-1.0.tar.gz/APIMATIC Caculator-1.0/apimaticcalculator/http/http_client.py | http_client.py |
import platform
from apimaticcalculator.api_helper import APIHelper
from apimaticcalculator.exceptions.api_exception import APIException
class BaseController(object):
"""All controllers inherit from this base class.
Attributes:
config (Configuration): The HttpClient which a specific controller
instance will use. By default all the controller objects share
the same HttpClient. A user can use his own custom HttpClient
as well.
http_call_back (HttpCallBack): An object which holds call back
methods to be called before and after the execution of an HttpRequest.
global_headers (dict): The global headers of the API which are sent with
every request.
"""
def global_headers(self):
return {
'user-agent': self.get_user_agent()
}
def __init__(self, config, call_back=None):
self._config = config
self._http_call_back = call_back
@property
def config(self):
return self._config
@property
def http_call_back(self):
return self._http_call_back
def validate_parameters(self, **kwargs):
"""Validates required parameters of an endpoint.
Args:
kwargs (dict): A dictionary of the required parameters.
"""
for name, value in kwargs.items():
if value is None:
raise ValueError("Required parameter {} cannot be None.".format(name))
def execute_request(self, request, binary=False):
"""Executes an HttpRequest.
Args:
request (HttpRequest): The HttpRequest to execute.
binary (bool): A flag which should be set to True if
a binary response is expected.
Returns:
HttpResponse: The HttpResponse received.
"""
# Invoke the on before request HttpCallBack if specified
if self.http_call_back is not None:
self.http_call_back.on_before_request(request)
# Add global headers to request
request.headers = APIHelper.merge_dicts(self.global_headers(), request.headers)
# Invoke the API call to fetch the response.
func = self.config.http_client.execute_as_binary if binary else self.config.http_client.execute_as_string
response = func(request)
# Invoke the on after response HttpCallBack if specified
if self.http_call_back is not None:
self.http_call_back.on_after_response(response)
return response
def validate_response(self, response):
"""Validates an HTTP response by checking for global errors.
Args:
response (HttpResponse): The HttpResponse of the API call.
"""
if (response.status_code < 200) or (response.status_code > 208): # [200,208] = HTTP OK
raise APIException('HTTP response not OK.', response)
def get_user_agent(self):
user_agent = 'APIMATIC 3.0'
parameters = {
}
agent = APIHelper.append_url_with_template_parameters(user_agent, parameters)
return agent.replace(' ', ' ') | APIMATIC-Caculator | /APIMATIC%20Caculator-1.0.tar.gz/APIMATIC Caculator-1.0/apimaticcalculator/controllers/base_controller.py | base_controller.py |
from apimaticcalculator.api_helper import APIHelper
from apimaticcalculator.configuration import Server
from apimaticcalculator.controllers.base_controller import BaseController
class SimpleCalculatorController(BaseController):
"""A Controller to access Endpoints in the apimaticcalculator API."""
def __init__(self, config, call_back=None):
super(SimpleCalculatorController, self).__init__(config, call_back)
def get_calculate(self,
options=dict()):
"""Does a GET request to /{operation}.
Calculates the expression using the specified operation.
Args:
options (dict, optional): Key-value pairs for any of the
parameters to this API Endpoint. All parameters to the
endpoint are supplied through the dictionary with their names
being the key and their desired values being the value. A list
of parameters that can be used are::
operation -- OperationTypeEnum -- The operator to apply on
the variables
x -- float -- The LHS value
y -- float -- The RHS value
Returns:
float: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/{operation}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'operation': {'value': options.get('operation', None), 'encode': True}
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_parameters = {
'x': options.get('x', None),
'y': options.get('y', None)
}
_query_builder = APIHelper.append_url_with_query_parameters(
_query_builder,
_query_parameters
)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare and execute request
_request = self.config.http_client.get(_query_url)
_response = self.execute_request(_request)
self.validate_response(_response)
decoded = float(_response.text)
return decoded | APIMATIC-Caculator | /APIMATIC%20Caculator-1.0.tar.gz/APIMATIC Caculator-1.0/apimaticcalculator/controllers/simple_calculator_controller.py | simple_calculator_controller.py |
import pandas as pd
import requests
import json
from datetime import datetime
import calendar
from typing import List, Tuple
from matplotlib.collections import LineCollection
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
from urllib.parse import urlencode
def __save_data(data, name: str, format: str) -> None:
"""
Guarda los datos en un archivo en el formato especificado.
Esta función toma un DataFrame de datos, un nombre de archivo y un formato ('csv' o 'xlsx').
Luego, guarda los datos en el formato especificado utilizando las funciones to_csv o to_excel.
Args:
data (pd.DataFrame): Los datos que se van a guardar.
name (str): Nombre del archivo (sin la extensión).
format (str): Formato del archivo ('csv' o 'xlsx').
Returns:
None
Ejemplo:
>>> df = pd.DataFrame({'column1': [1, 2, 3], 'column2': [4, 5, 6]})
>>> __save_data(df, 'my_data', 'csv')
"""
if format == 'csv':
data.to_csv(name + '.csv')
elif format == 'xlsx':
data.to_excel(name + '.xlsx')
else:
print('Formato no válido. Formatos válidos: csv y xlsx')
def __convert_measurements(measurements: list[str], mode="lower"):
"""
Convierte y corrige nombres de mediciones según un modo especificado.
Esta función toma una lista de nombres de mediciones, opcionalmente corrige algunos nombres según un diccionario de
correcciones específicas y luego los convierte a mayúsculas o minúsculas según el modo especificado.
Args:
measurements (list[str]): Lista de nombres de mediciones.
mode (str): Modo de conversión ('lower' para minúsculas, 'upper' para mayúsculas).
Returns:
list[str]: Lista de nombres de mediciones convertidos.
Ejemplo:
>>> measurements = ['temperatura2', 'HUMEDAD_2', 'NO2']
>>> converted_measurements = __convert_measurements(measurements, 'upper')
"""
# Diccionario de correcciones específicas
corrections = {
"temperatura2": "temperatura_2",
"temperatura_2": "temperatura2",
"humedad2": "humedad_2",
"humedad_2": "humedad2",
"TEMPERATURA2": "TEMPERATURA_2",
"TEMPERATURA_2": "temperatura2", # Nota: hay un error tipográfico en el valor, corregido aquí
"HUMEDAD2": "HUMEDAD_2",
"HUMEDAD_2": "humedad2"
}
new_measurements = []
for measurement in measurements:
# Aplicar correcciones específicas si es necesario
corrected_measurement = corrections.get(measurement, measurement)
# Convertir a mayúsculas o minúsculas según el modo
new_measurement = corrected_measurement.upper() if mode == 'upper' else corrected_measurement.lower()
new_measurements.append(new_measurement)
return new_measurements
def download_data(id_device: str, start_date: str, end_date: str, sample_rate: str, format: str = None, fields: str = None):
"""
Descarga y procesa datos de un dispositivo en un rango de fechas especificado.
Esta función descarga datos de un dispositivo utilizando la API de Makesens, procesa los datos descargados y
devuelve un DataFrame. Si se proporciona un formato, también guarda los datos en un archivo con ese formato.
Args:
id_device (str): ID del dispositivo desde el cual se descargan los datos.
start_date (str): Fecha y hora de inicio en formato 'YYYY-MM-DD HH:MM:SS'.
end_date (str): Fecha y hora de fin en formato 'YYYY-MM-DD HH:MM:SS'.
sample_rate (str): Tasa de muestreo para los datos ('m' para minutos, 'h' para horas, 'd' para días).
format (str, optional): Formato para guardar los datos descargados ('csv' o 'xlsx'). Por defecto None.
fields (str, optional): Lista de campos específicos a descargar. Por defecto None (todos los campos).
Returns:
pd.DataFrame: DataFrame con los datos descargados.
Ejemplo:
>>> data = download_data('device123', '2023-01-01 00:00:00', '2023-01-02 00:00:00', 'h', 'csv', 'pm10_1')
"""
# Convertir las fechas string a datetime
start_date_ = datetime.strptime(start_date, '%Y-%m-%d %H:%M:%S')
end_date_ = datetime.strptime(end_date, '%Y-%m-%d %H:%M:%S')
# Convertir datetime a timestamp Unix
start = int(calendar.timegm(start_date_.utctimetuple())) * 1000
end = int(calendar.timegm(end_date_.utctimetuple())) * 1000
dat = [] # Almacenar los datos
tmin = start
if fields is not None:
fields = fields.split(',')
fields = str(','.join(__convert_measurements(fields, mode='upper')))
while tmin < end:
params = {'min_ts': tmin,
'max_ts': end,
'agg': sample_rate}
if fields is not None:
params['fields'] = fields
encoded_params = urlencode(params)
url = f'https://api.makesens.co/device/{id_device}/data?{encoded_params}'
try:
rta = requests.get(url).content
d = json.loads(rta)
except Exception as e:
print(f"Error fetching or parsing data: {e}")
break
# Salir del bucle si no hay datos o si el timestamp no ha cambiado
if len(d) == 1 or tmin == int(d['date_range']['end']):
break
tmin = int(d['date_range']['end'])
dat.extend(d['data'])
if dat:
data = pd.DataFrame(dat)
data['ts'] = pd.to_datetime(data['ts'], unit='ms', utc=False)
# Poner las variables como se conocen
new_columns = __convert_measurements(list(data.columns))
data.columns = new_columns
data.rename(columns={
"pm10_1_ae" : "pm10_1_AE",
"pm10_2_ae" : "pm10_2_AE",
"pm25_1_ae" : "pm25_1_AE",
"pm25_2_ae" : "pm25_2_AE",
"pm1_1_ae" : "pm1_1_AE",
"pm1_2_ae" : "pm1_2_AE",
}, inplace=True)
start_ = start_date.replace(':', '_')
end_ = end_date.replace(':', '_')
name = id_device + '_' + start_ + '_' + end_ + '_ ' + sample_rate
if format is not None:
__save_data(data, name, format)
return data
# def download_data(id_device:str,start_date:str,end_date:str, sample_rate:str,format:str = None, fields:str = None):
# start:int = int((datetime.strptime(start_date, "%Y-%m-%d %H:%M:%S") - datetime(1970, 1, 1)).total_seconds())
# end:int = int((datetime.strptime(end_date, "%Y-%m-%d %H:%M:%S") - datetime(1970, 1, 1)).total_seconds())
# dat:list = []
# tmin:int = start
# while tmin < end:
# if fields == None:
# url = f'https://api.makesens.co/ambiental/metricas/{id_device}/data?agg=1{sample_rate}&agg_type=mean&items=1000&max_ts={str(end * 1000)}&min_ts={str(tmin * 1000)}'
# else:
# url = f'https://api.makesens.co/ambiental/metricas/{id_device}/data?agg=1{sample_rate}&agg_type=mean&fields={fields}&items=1000&max_ts={str(end * 1000)}&min_ts={str(tmin * 1000)}'
# rta = requests.get(url).content
# d = json.loads(rta)
# try:
# if tmin == (d[-1]['ts']//1000) + 1:
# break
# dat = dat + d
# tmin = (d[-1]['ts']//1000) + 1
# except IndexError:
# break
# data = pd.DataFrame([i['val'] for i in dat], index=[datetime.utcfromtimestamp(i['ts']/1000).strftime('%Y-%m-%d %H:%M:%S') for i in dat])
# start_ = start_date.replace(':','_')
# end_ = end_date.replace(':','_')
# name = id_device + '_'+ start_ +'_' + end_ + '_ ' + sample_rate
# if format != None:
# __save_data(data,name,format)
# return data
# -------------------------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------------------------
def __gradient_plot(data, scale, y_label, sample_rate):
"""
Crea un gráfico de degradado para datos temporales.
Esta función crea un gráfico de degradado que representa la variación de datos temporales en un rango
de tiempo determinado. Utiliza una escala de colores y muestra las barras de colores en función de la
intensidad de los datos.
Args:
data (pd.Series): Serie temporal de datos.
scale (tuple): Rango de valores para la escala de colores.
y_label (str): Etiqueta del eje Y.
sample_rate (str): Tasa de muestreo de los datos temporales.
Returns:
None
Ejemplo:
>>> data = pd.Series([10, 20, 30, 40, 50, 60])
>>> __gradient_plot(data, (10, 60), 'PM2.5', '1T')
"""
if sample_rate == '1T':
sample_rate = '1T'
elif sample_rate == 'w':
sample_rate = '7d'
data.index = pd.DatetimeIndex(data.index)
a = pd.date_range(data.index[0], data.index[-1], freq=sample_rate)
s = []
for i in a:
if i in data.index:
s.append(data[i])
else:
s.append(np.nan)
dat = pd.DataFrame(index=a)
dat['PM'] = s
dat.index = dat.index.strftime("%Y-%m-%d %H:%M:%S")
colorlist = ["green", "yellow", 'Orange', "red", 'Purple', 'Brown']
newcmp = LinearSegmentedColormap.from_list('testCmap', colors=colorlist, N=256)
y_ = np.array(list(dat['PM']))
x_ = np.linspace(1, len(y_), len(y_))
x = np.linspace(1, len(y_), 10000)
y = np.interp(x, x_, y_)
points = np.array([x-1, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
fig, ax = plt.subplots(figsize=(15, 5))
norm = plt.Normalize(scale[0], scale[1])
lc = LineCollection(segments, cmap=newcmp, norm=norm)
lc.set_array(y)
lc.set_linewidth(1)
line = ax.add_collection(lc)
dat['PM'].plot(lw=0)
plt.colorbar(line, ax=ax)
ax.set_ylim(min(y)-10, max(y)+10)
plt.ylabel(y_label+' $\mu g / m^3$', fontsize=14)
plt.xlabel('Estampa temporal', fontsize=14)
plt.gcf().autofmt_xdate()
plt.show()
def gradient_pm10(id_device: str, start_date: str, end_date: str, sample_rate: str):
"""
Descarga, procesa y visualiza los datos PM10 de un dispositivo en un gráfico de gradiente.
Esta función descarga los datos PM10 de un dispositivo en el período especificado, los procesa y crea un gráfico
de gradiente utilizando la función '__gradient_plot'. La escala y la tasa de muestreo se configuran según la necesidad.
Args:
- id_device (str): ID del dispositivo desde el cual se descargan los datos.
- start_date (str): Fecha y hora de inicio en formato 'YYYY-MM-DD HH:MM:SS'.
- end_date (str): Fecha y hora de fin en formato 'YYYY-MM-DD HH:MM:SS'.
- sample_rate (str): Tasa de muestreo ('m' para minutos, 'w' para semanas).
Returns:
- None
Ejemplo:
- gradient_pm10('mE1_00003', '2023-01-01 00:00:00', '2023-01-02 00:00:00', '1h')
"""
data = download_data(id_device, start_date, end_date, sample_rate, fields='pm10_1')
#data['ts'] = data.index
data = data.drop_duplicates(subset=['ts'])
data.index = data['ts']
__gradient_plot(data.pm10_1, (54, 255), 'PM10 ', sample_rate)
def gradient_pm2_5(id_device: str, start_date: str, end_date: str, sample_rate: str):
"""
Descarga, procesa y visualiza los datos PM2.5 de un dispositivo en un gráfico de gradiente.
Esta función descarga los datos PM2.5 de un dispositivo en el período especificado, los procesa y crea un gráfico
de gradiente utilizando la función '__gradient_plot'. La escala y la tasa de muestreo se configuran según la necesidad.
Args:
id_device (str): ID del dispositivo desde el cual se descargan los datos.
start_date (str): Fecha y hora de inicio en formato 'YYYY-MM-DD HH:MM:SS'.
end_date (str): Fecha y hora de fin en formato 'YYYY-MM-DD HH:MM:SS'.
sample_rate (str): Tasa de muestreo ('m' para minutos, 'w' para semanas).
Returns:
None
Ejemplo:
>>> gradient_pm2_5('device123', '2023-01-01 00:00:00', '2023-01-02 00:00:00', '1h')
"""
data = download_data(id_device, start_date, end_date, sample_rate, fields='pm25_1')
#data['ts'] = data.index
data = data.drop_duplicates(subset=['ts'])
data.index = data['ts']
__gradient_plot(data.pm25_1, (12, 251), 'PM2.5 ', sample_rate)
# -------------------------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------------------------
def _heatmap_plot(data, scale, title):
"""
Crea un mapa de calor para visualizar datos temporales en horas y fechas.
Esta función crea un mapa de calor que muestra la variación de los datos temporales a lo largo de las horas y las fechas.
Utiliza una escala de colores para resaltar las variaciones en los datos.
Args:
data (pd.Series): Serie temporal de datos.
scale (tuple): Rango de valores para la escala de colores.
title (str): Título del mapa de calor.
Returns:
None
Ejemplo:
>>> data = pd.Series([...])
>>> _heatmap_plot(data, (0, 100), 'Concentración de PM2.5')
"""
colorlist = ["green", "yellow", 'Orange', "red", 'Purple', 'Brown']
newcmp = LinearSegmentedColormap.from_list('testCmap', colors=colorlist, N=256)
norm = plt.Normalize(scale[0], scale[1])
date = pd.date_range(data.index.date[0], data.index.date[-1]).date
hours = range(0, 24)
mapa = pd.DataFrame(columns=date, index=hours, dtype="float")
for i in range(0, len(date)):
dat = data[data.index.date == date[i]]
for j in range(0, len(dat)):
fila = dat.index.hour[j]
mapa[date[i]][fila] = dat[j]
plt.figure(figsize=(10, 8))
ax = sns.heatmap(mapa, cmap=newcmp, norm=norm)
plt.ylabel('Horas', fontsize=16)
plt.xlabel('Estampa temporal', fontsize=16)
plt.title(title + ' $\mu g / m^3$', fontsize=16)
plt.show()
def heatmap_pm10(id_device: str, start_date: str, end_date: str):
"""
Crea un mapa de calor para los datos PM10 de un dispositivo.
Esta función descarga los datos PM10 de un dispositivo en un rango de fechas especificado y crea un mapa de calor
para visualizar la variación de los datos a lo largo de las horas y las fechas.
Args:
id_device (str): ID del dispositivo desde el cual se descargan los datos.
start_date (str): Fecha y hora de inicio en formato 'YYYY-MM-DD HH:MM:SS'.
end_date (str): Fecha y hora de fin en formato 'YYYY-MM-DD HH:MM:SS'.
Returns:
None
Ejemplo:
>>> heatmap_pm10('device123', '2023-01-01 00:00:00', '2023-01-02 00:00:00')
"""
data = download_data(id_device, start_date, end_date, '1H', fields='pm10_1')
data.index = pd.DatetimeIndex(data['ts'])
data = data.pm10_1
_heatmap_plot(data, (54, 255), 'PM10')
def heatmap_pm2_5(id_device: str, start_date: str, end_date: str):
"""
Crea un mapa de calor para los datos PM2.5 de un dispositivo.
Esta función descarga los datos PM2.5 de un dispositivo en un rango de fechas especificado y crea un mapa de calor
para visualizar la variación de los datos a lo largo de las horas y las fechas.
Args:
id_device (str): ID del dispositivo desde el cual se descargan los datos.
start_date (str): Fecha y hora de inicio en formato 'YYYY-MM-DD HH:MM:SS'.
end_date (str): Fecha y hora de fin en formato 'YYYY-MM-DD HH:MM:SS'.
Returns:
None
Ejemplo:
>>> heatmap_pm2_5('device123', '2023-01-01 00:00:00', '2023-01-02 00:00:00')
"""
data = download_data(id_device, start_date, end_date, '1H', fields='pm25_1')
data.index = pd.DatetimeIndex(data['ts'])
data = data.pm25_1
_heatmap_plot(data, (12, 251), 'PM2.5')
# -------------------------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------------------------
def weekly_profile(id_device: str, start_date: str, end_date: str, field: str):
"""
Crea un perfil semanal para un campo específico de un dispositivo.
Esta función descarga los datos de un campo específico de un dispositivo en un rango de fechas y crea un perfil
semanal que muestra cómo varían los datos a lo largo de los días de la semana y las horas del día.
Args:
id_device (str): ID del dispositivo desde el cual se descargan los datos.
start_date (str): Fecha y hora de inicio en formato 'YYYY-MM-DD HH:MM:SS'.
end_date (str): Fecha y hora de fin en formato 'YYYY-MM-DD HH:MM:SS'.
field (str): Campo específico para el cual se creará el perfil (p.ej. 'PM10' o 'PM2.5').
Returns:
None
Ejemplo:
>>> weekly_profile('device123', '2023-01-01 00:00:00', '2023-01-07 23:59:59', 'PM10')
"""
fields = {'PM10': {'variable': 'pm10_1', 'unidades': '[$\mu g/m^3$ ]'}, 'PM2.5': {'variable': 'pm25_1', 'unidades': '[$\mu g/m^3$ ]'},
'CO2': {'ppm'}}
var = fields[field]['variable']
unidad = fields[field]['unidades']
data = download_data(id_device, start_date, end_date, '1H', fields=var)
data.index = pd.DatetimeIndex(data['ts'])
days = range(0, 7)
hours = range(0, 24)
data['day'] = [i.weekday() for i in data.index]
data['hour'] = [i.hour for i in data.index]
variable_mean = []
variable_std = []
for day in days:
for hour in hours:
variable = data[(data.day == day) & (data.hour == hour)][var]
variable_mean.append(variable.mean())
variable_std.append(variable.std())
a = min(np.array(variable_mean) - np.array(variable_std))
b = max(np.array(variable_mean) + np.array(variable_std))
x = [i for i in range(168)]
plt.figure(figsize=(18, 4))
plt.plot(x, np.array(variable_mean))
plt.fill_between(x, np.array(variable_mean) - np.array(variable_std), np.array(variable_mean) + np.array(variable_std), color='r', alpha=0.2)
plt.xticks(np.linspace(0, 162, 28), ['0', '6', '12', '18'] * 7)
plt.hlines(b + 5, 0, 167, color='k')
for i in np.linspace(0, 168, 8)[1:-1]:
plt.vlines(i, a, b + 15, color='k', ls='--', lw=1)
name_days = ['Lunes', 'Martes', 'Miércoles', 'Jueves', 'Viernes', 'Sábado', 'Domingo']
position_x = [9, 30, 55, 80, 103, 128, 150]
for i in range(0, len(name_days)):
plt.text(position_x[i], b + 8, name_days[i], fontsize=13)
plt.xlim(0, 167)
plt.ylim(a, b + 15)
plt.xlabel('Horas', fontsize=14)
plt.ylabel(f'{field} {unidad}', fontsize=14)
plt.show() | APIMakeSens | /APIMakeSens-1.3.5.tar.gz/APIMakeSens-1.3.5/MakeSens/MakeSens.py | MakeSens.py |
import re
import sys
import datetime
import calendar
import email.utils as eut
from time import mktime
import jsonpickle
import dateutil.parser
from requests.utils import quote
class APIHelper(object):
"""A Helper Class for various functions associated with API Calls.
This class contains static methods for operations that need to be
performed during API requests. All of the methods inside this class are
static methods, there is no need to ever initialise an instance of this
class.
"""
@staticmethod
def merge_dicts(dict1, dict2):
"""Merges two dictionaries into one as a shallow copy.
Args:
dict1 (dict): The first dictionary.
dict2 (dict): The second dictionary.
Returns:
dict: A dictionary containing key value pairs
from both the argument dictionaries. In the case
of a key conflict, values from dict2 are used
and those from dict1 are lost.
"""
temp = dict1.copy()
temp.update(dict2)
return temp
@staticmethod
def json_serialize(obj):
"""JSON Serialization of a given object.
Args:
obj (object): The object to serialize.
Returns:
str: The JSON serialized string of the object.
"""
if obj is None:
return None
# Resolve any Names if it's one of our objects that needs to have this called on
if isinstance(obj, list):
value = list()
for item in obj:
if hasattr(item, "_names"):
value.append(APIHelper.to_dictionary(item))
else:
value.append(item)
obj = value
else:
if hasattr(obj, "_names"):
obj = APIHelper.to_dictionary(obj)
return jsonpickle.encode(obj, False)
@staticmethod
def json_deserialize(json, unboxing_function=None, as_dict=False):
"""JSON Deserialization of a given string.
Args:
json (str): The JSON serialized string to deserialize.
Returns:
dict: A dictionary representing the data contained in the
JSON serialized string.
"""
if json is None:
return None
try:
decoded = jsonpickle.decode(json)
except ValueError:
return json
if unboxing_function is None:
return decoded
if as_dict:
return {k: unboxing_function(v) for k, v in decoded.items()}
elif isinstance(decoded, list):
return [unboxing_function(element) for element in decoded]
else:
return unboxing_function(decoded)
@staticmethod
def get_content_type(value):
"""Get content type header for oneof.
Args:
value: The value passed by the user.
Returns:
dict: A dictionary representing the data contained in the
JSON serialized string.
"""
if value is None:
return None
primitive = (int, str, bool, float)
if type(value) in primitive:
return 'text/plain; charset=utf-8'
else:
return 'application/json; charset=utf-8'
@staticmethod
def get_schema_path(path):
"""Return the Schema's path
Returns:
string : returns Correct schema path
"""
path = path.replace('\\models', '\\schemas').replace('/models', '/schemas').replace(".py", ".json")
return path
@staticmethod
def serialize_array(key, array, formatting="indexed", is_query=False):
"""Converts an array parameter to a list of key value tuples.
Args:
key (str): The name of the parameter.
array (list): The value of the parameter.
formatting (str): The type of key formatting expected.
is_query (bool): Decides if the parameters are for query or form.
Returns:
list: A list with key value tuples for the array elements.
"""
tuples = []
if sys.version_info[0] < 3:
serializable_types = (str, int, long, float, bool, datetime.date, APIHelper.CustomDate)
else:
serializable_types = (str, int, float, bool, datetime.date, APIHelper.CustomDate)
if isinstance(array[0], serializable_types):
if formatting == "unindexed":
tuples += [("{0}[]".format(key), element) for element in array]
elif formatting == "indexed":
tuples += [("{0}[{1}]".format(key, index), element) for index, element in enumerate(array)]
elif formatting == "plain":
tuples += [(key, element) for element in array]
elif is_query:
if formatting == "csv":
tuples += [(key, ",".join(str(x) for x in array))]
elif formatting == "psv":
tuples += [(key, "|".join(str(x) for x in array))]
elif formatting == "tsv":
tuples += [(key, "\t".join(str(x) for x in array))]
else:
raise ValueError("Invalid format provided.")
else:
tuples += [("{0}[{1}]".format(key, index), element) for index, element in enumerate(array)]
return tuples
@staticmethod
def append_url_with_template_parameters(url, parameters):
"""Replaces template parameters in the given url.
Args:
url (str): The query url string to replace the template parameters.
parameters (dict): The parameters to replace in the url.
Returns:
str: URL with replaced parameters.
"""
# Parameter validation
if url is None:
raise ValueError("URL is None.")
if parameters is None:
return url
# Iterate and replace parameters
for key in parameters:
value = parameters[key]['value']
encode = parameters[key]['encode']
replace_value = ''
# Load parameter value
if value is None:
replace_value = ''
elif isinstance(value, list):
replace_value = "/".join((quote(str(x), safe='') if encode else str(x)) for x in value)
else:
replace_value = quote(str(value), safe='') if encode else str(value)
url = url.replace('{{{0}}}'.format(key), str(replace_value))
return url
@staticmethod
def append_url_with_query_parameters(url,
parameters,
array_serialization="indexed"):
"""Adds query parameters to a URL.
Args:
url (str): The URL string.
parameters (dict): The query parameters to add to the URL.
array_serialization (str): The format of array parameter serialization.
Returns:
str: URL with added query parameters.
"""
# Parameter validation
if url is None:
raise ValueError("URL is None.")
if parameters is None:
return url
parameters = APIHelper.process_complex_types_parameters(parameters, array_serialization)
for index, value in enumerate(parameters):
key = value[0]
val = value[1]
seperator = '&' if '?' in url else '?'
if value is not None:
url += "{0}{1}={2}".format(seperator, key, quote(str(val), safe=''))
return url
@staticmethod
def process_complex_types_parameters(query_parameters, array_serialization):
processed_params = []
for key, value in query_parameters.items():
processed_params.extend(
APIHelper.form_encode(value, key, array_serialization=array_serialization, is_query=True))
return processed_params
@staticmethod
def clean_url(url):
"""Validates and processes the given query Url to clean empty slashes.
Args:
url (str): The given query Url to process.
Returns:
str: Clean Url as string.
"""
# Ensure that the urls are absolute
regex = "^https?://[^/]+"
match = re.match(regex, url)
if match is None:
raise ValueError('Invalid Url format.')
protocol = match.group(0)
index = url.find('?')
query_url = url[len(protocol): index if index != -1 else None]
query_url = re.sub("//+", "/", query_url)
parameters = url[index:] if index != -1 else ""
return protocol + query_url + parameters
@staticmethod
def form_encode_parameters(form_parameters,
array_serialization="indexed"):
"""Form encodes a dictionary of form parameters
Args:
form_parameters (dictionary): The given dictionary which has
atleast one model to form encode.
array_serialization (str): The format of array parameter serialization.
Returns:
dict: A dictionary of form encoded properties of the model.
"""
encoded = []
for key, value in form_parameters.items():
encoded += APIHelper.form_encode(value, key, array_serialization)
return encoded
@staticmethod
def form_encode(obj,
instance_name,
array_serialization="indexed", is_query=False):
"""Encodes a model in a form-encoded manner such as person[Name]
Args:
obj (object): The given Object to form encode.
instance_name (string): The base name to appear before each entry
for this object.
array_serialization (string): The format of array parameter serialization.
is_query (bool): Decides if the parameters are for query or form.
Returns:
dict: A dictionary of form encoded properties of the model.
"""
retval = []
# If we received an object, resolve it's field names.
if hasattr(obj, "_names"):
obj = APIHelper.to_dictionary(obj)
if obj is None:
return []
elif isinstance(obj, list):
for element in APIHelper.serialize_array(instance_name, obj, array_serialization, is_query):
retval += APIHelper.form_encode(element[1], element[0], array_serialization, is_query)
elif isinstance(obj, dict):
for item in obj:
retval += APIHelper.form_encode(obj[item], instance_name + "[" + item + "]", array_serialization, is_query)
else:
retval.append((instance_name, obj))
return retval
@staticmethod
def to_dictionary(obj):
"""Creates a dictionary representation of a class instance. The
keys are taken from the API description and may differ from language
specific variable names of properties.
Args:
obj: The object to be converted into a dictionary.
Returns:
dictionary: A dictionary form of the model with properties in
their API formats.
"""
dictionary = dict()
# Loop through all properties in this model
for name in obj._names:
value = getattr(obj, name)
if isinstance(value, list):
# Loop through each item
dictionary[obj._names[name]] = list()
for item in value:
dictionary[obj._names[name]].append(APIHelper.to_dictionary(item) if hasattr(item, "_names") else item)
elif isinstance(value, dict):
# Loop through each item
dictionary[obj._names[name]] = dict()
for key in value:
dictionary[obj._names[name]][key] = APIHelper.to_dictionary(value[key]) if hasattr(value[key], "_names") else value[key]
else:
dictionary[obj._names[name]] = APIHelper.to_dictionary(value) if hasattr(value, "_names") else value
# Return the result
return dictionary
@staticmethod
def when_defined(func, value):
return func(value) if value else None
class CustomDate(object):
""" A base class for wrapper classes of datetime.
This class contains methods which help in
appropriate serialization of datetime objects.
"""
def __init__(self, dtime, value=None):
self.datetime = dtime
if not value:
self.value = self.from_datetime(dtime)
else:
self.value = value
def __repr__(self):
return str(self.value)
def __getstate__(self):
return self.value
def __setstate__(self, state):
pass
class HttpDateTime(CustomDate):
""" A wrapper class for datetime to support HTTP date format."""
@classmethod
def from_datetime(cls, date_time):
return eut.formatdate(timeval=mktime(date_time.timetuple()),
localtime=False, usegmt=True)
@classmethod
def from_value(cls, value):
dtime = datetime.datetime.fromtimestamp(eut.mktime_tz(eut.parsedate_tz(value)))
return cls(dtime, value)
class UnixDateTime(CustomDate):
""" A wrapper class for datetime to support Unix date format."""
@classmethod
def from_datetime(cls, date_time):
return calendar.timegm(date_time.utctimetuple())
@classmethod
def from_value(cls, value):
dtime = datetime.datetime.utcfromtimestamp(float(value))
return cls(dtime, float(value))
class RFC3339DateTime(CustomDate):
""" A wrapper class for datetime to support Rfc 3339 format."""
@classmethod
def from_datetime(cls, date_time):
return date_time.isoformat()
@classmethod
def from_value(cls, value):
dtime = dateutil.parser.parse(value)
return cls(dtime, value) | APIMaticSampleCalcPyPI | /APIMaticSampleCalcPyPI-1.2-py3-none-any.whl/calculatormharis/api_helper.py | api_helper.py |
from enum import Enum
from calculatormharis.http.requests_client import RequestsClient
class Environment(Enum):
"""An enum for SDK environments"""
PRODUCTION = 0
class Server(Enum):
"""An enum for API servers"""
DEFAULT = 0
class Configuration(object):
"""A class used for configuring the SDK by a user.
"""
@property
def http_client(self):
return self._http_client
@property
def http_client_instance(self):
return self._http_client_instance
@property
def override_http_client_configuration(self):
return self._override_http_client_configuration
@property
def http_call_back(self):
return self._http_call_back
@property
def timeout(self):
return self._timeout
@property
def max_retries(self):
return self._max_retries
@property
def backoff_factor(self):
return self._backoff_factor
@property
def retry_statuses(self):
return self._retry_statuses
@property
def retry_methods(self):
return self._retry_methods
@property
def environment(self):
return self._environment
def __init__(
self, http_client_instance=None,
override_http_client_configuration=False, http_call_back=None,
timeout=60, max_retries=0, backoff_factor=2,
retry_statuses=[408, 413, 429, 500, 502, 503, 504, 521, 522, 524],
retry_methods=['GET', 'PUT'], environment=Environment.PRODUCTION
):
# The Http Client passed from the sdk user for making requests
self._http_client_instance = http_client_instance
# The value which determines to override properties of the passed Http Client from the sdk user
self._override_http_client_configuration = override_http_client_configuration
# The callback value that is invoked before and after an HTTP call is made to an endpoint
self._http_call_back = http_call_back
# The value to use for connection timeout
self._timeout = timeout
# The number of times to retry an endpoint call if it fails
self._max_retries = max_retries
# A backoff factor to apply between attempts after the second try.
# urllib3 will sleep for:
# `{backoff factor} * (2 ** ({number of total retries} - 1))`
self._backoff_factor = backoff_factor
# The http statuses on which retry is to be done
self._retry_statuses = retry_statuses
# The http methods on which retry is to be done
self._retry_methods = retry_methods
# Current API environment
self._environment = environment
# The Http Client to use for making requests.
self._http_client = self.create_http_client()
def clone_with(self, http_client_instance=None,
override_http_client_configuration=None, http_call_back=None,
timeout=None, max_retries=None, backoff_factor=None,
retry_statuses=None, retry_methods=None, environment=None):
http_client_instance = http_client_instance or self.http_client_instance
override_http_client_configuration = override_http_client_configuration or self.override_http_client_configuration
http_call_back = http_call_back or self.http_call_back
timeout = timeout or self.timeout
max_retries = max_retries or self.max_retries
backoff_factor = backoff_factor or self.backoff_factor
retry_statuses = retry_statuses or self.retry_statuses
retry_methods = retry_methods or self.retry_methods
environment = environment or self.environment
return Configuration(
http_client_instance=http_client_instance,
override_http_client_configuration=override_http_client_configuration,
http_call_back=http_call_back, timeout=timeout,
max_retries=max_retries, backoff_factor=backoff_factor,
retry_statuses=retry_statuses, retry_methods=retry_methods,
environment=environment
)
def create_http_client(self):
return RequestsClient(
timeout=self.timeout, max_retries=self.max_retries,
backoff_factor=self.backoff_factor, retry_statuses=self.retry_statuses,
retry_methods=self.retry_methods,
http_client_instance=self.http_client_instance,
override_http_client_configuration=self.override_http_client_configuration
)
# All the environments the SDK can run in
environments = {
Environment.PRODUCTION: {
Server.DEFAULT: 'http://examples.apimatic.io/apps/calculator'
}
}
def get_base_uri(self, server=Server.DEFAULT):
"""Generates the appropriate base URI for the environment and the
server.
Args:
server (Configuration.Server): The server enum for which the base
URI is required.
Returns:
String: The base URI.
"""
return self.environments[self.environment][server] | APIMaticSampleCalcPyPI | /APIMaticSampleCalcPyPI-1.2-py3-none-any.whl/calculatormharis/configuration.py | configuration.py |
from cachecontrol import CacheControl
from requests import session
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from calculatormharis.http.http_client import HttpClient
from calculatormharis.http.http_method_enum import HttpMethodEnum
from calculatormharis.http.http_response import HttpResponse
class RequestsClient(HttpClient):
"""An implementation of HttpClient that uses Requests as its HTTP Client
Attributes:
timeout (int): The default timeout for all API requests.
"""
def __init__(self,
timeout=60,
cache=False,
max_retries=None,
backoff_factor=None,
retry_statuses=None,
retry_methods=None,
verify=True,
http_client_instance=None,
override_http_client_configuration=False):
"""The constructor.
Args:
timeout (float): The default global timeout(seconds).
"""
if http_client_instance == None:
self.create_default_http_cient(timeout, cache, max_retries,
backoff_factor, retry_statuses,
retry_methods, verify)
else:
if override_http_client_configuration == True:
http_client_instance.timeout = timeout
http_client_instance.session.verify = verify
adapters = http_client_instance.session.adapters
for adapter in adapters.values():
adapter.max_retries.total = max_retries
adapter.max_retries.backoff_factor = backoff_factor
adapter.max_retries.status_forcelist = retry_statuses
adapter.max_retries.allowed_methods = retry_methods
self.timeout = http_client_instance.timeout
self.session = http_client_instance.session
def create_default_http_cient(self,
timeout=60,
cache=False,
max_retries=None,
backoff_factor=None,
retry_statuses=None,
retry_methods=None,
verify=True):
self.timeout = timeout
self.session = session()
retries = Retry(total=max_retries, backoff_factor=backoff_factor,
status_forcelist=retry_statuses, allowed_methods=retry_methods)
self.session.mount('http://', HTTPAdapter(max_retries=retries))
self.session.mount('https://', HTTPAdapter(max_retries=retries))
if cache:
self.session = CacheControl(self.session)
self.session.verify = verify
def force_retries(self, request, to_retry=None):
"""Reset retries according to each request
Args:
request (HttpRequest): The given HttpRequest to execute.
to_retry (boolean): whether to retry on a particular request
"""
adapters = self.session.adapters
if to_retry is False:
for adapter in adapters.values():
adapter.max_retries = False
elif to_retry is True:
for adapter in adapters.values():
adapter.max_retries.allowed_methods = [request.http_method]
def execute_as_string(self, request, to_retry=None):
"""Execute a given HttpRequest to get a string response back
Args:
request (HttpRequest): The given HttpRequest to execute.
to_retry (boolean): whether to retry on a particular request
Returns:
HttpResponse: The response of the HttpRequest.
"""
old_adapters = self.session.adapters
self.force_retries(request, to_retry)
response = self.session.request(
HttpMethodEnum.to_string(request.http_method),
request.query_url,
headers=request.headers,
params=request.query_parameters,
data=request.parameters,
files=request.files,
timeout=self.timeout
)
self.session.adapters = old_adapters
return self.convert_response(response, False, request)
def execute_as_binary(self, request, to_retry=None):
"""Execute a given HttpRequest to get a binary response back
Args:
request (HttpRequest): The given HttpRequest to execute.
to_retry (boolean): whether to retry on a particular request
Returns:
HttpResponse: The response of the HttpRequest.
"""
old_adapters = self.session.adapters
self.force_retries(request, to_retry)
response = self.session.request(
HttpMethodEnum.to_string(request.http_method),
request.query_url,
headers=request.headers,
params=request.query_parameters,
data=request.parameters,
files=request.files,
timeout=self.timeout
)
self.session.adapters = old_adapters
return self.convert_response(response, True, request)
def convert_response(self, response, binary, http_request):
"""Converts the Response object of the HttpClient into an
HttpResponse object.
Args:
response (dynamic): The original response object.
http_request (HttpRequest): The original HttpRequest object.
Returns:
HttpResponse: The converted HttpResponse object.
"""
if binary:
return HttpResponse(
response.status_code,
response.reason,
response.headers,
response.content,
http_request
)
else:
return HttpResponse(
response.status_code,
response.reason,
response.headers,
response.text,
http_request
) | APIMaticSampleCalcPyPI | /APIMaticSampleCalcPyPI-1.2-py3-none-any.whl/calculatormharis/http/requests_client.py | requests_client.py |
from calculatormharis.api_helper import APIHelper
class HttpRequest(object):
"""Information about an HTTP Request including its method, headers,
parameters, URL, and Basic Auth details
Attributes:
http_method (HttpMethodEnum): The HTTP Method that this request should
perform when called.
headers (dict): A dictionary of headers (key : value) that should be
sent along with the request.
query_url (string): The URL that the request should be sent to.
parameters (dict): A dictionary of parameters that are to be sent along
with the request in the form body of the request
"""
def __init__(self,
http_method,
query_url,
headers=None,
query_parameters=None,
parameters=None,
files=None):
"""Constructor for the HttpRequest class
Args:
http_method (HttpMethodEnum): The HTTP Method.
query_url (string): The URL to send the request to.
headers (dict, optional): The headers for the HTTP Request.
query_parameters (dict, optional): Query parameters to add in the
URL.
parameters (dict, optional): Form or body parameters to be included
in the body.
files (dict, optional): Files to be sent with the request.
"""
self.http_method = http_method
self.query_url = query_url
self.headers = headers
self.query_parameters = query_parameters
self.parameters = parameters
self.files = files
def add_header(self, name, value):
""" Add a header to the HttpRequest.
Args:
name (string): The name of the header.
value (string): The value of the header.
"""
self.headers[name] = value
def add_parameter(self, name, value):
""" Add a parameter to the HttpRequest.
Args:
name (string): The name of the parameter.
value (string): The value of the parameter.
"""
self.parameters[name] = value
def add_query_parameter(self, name, value):
""" Add a query parameter to the HttpRequest.
Args:
name (string): The name of the query parameter.
value (string): The value of the query parameter.
"""
self.query_url = APIHelper.append_url_with_query_parameters(
self.query_url,
{name: value}
)
self.query_url = APIHelper.clean_url(self.query_url) | APIMaticSampleCalcPyPI | /APIMaticSampleCalcPyPI-1.2-py3-none-any.whl/calculatormharis/http/http_request.py | http_request.py |
from calculatormharis.http.http_method_enum import HttpMethodEnum
from calculatormharis.http.http_request import HttpRequest
class HttpClient(object):
"""An interface for the methods that an HTTP Client must implement
This class should not be instantiated but should be used as a base class
for HTTP Client classes.
"""
def execute_as_string(self, request):
"""Execute a given HttpRequest to get a string response back
Args:
request (HttpRequest): The given HttpRequest to execute.
Returns:
HttpResponse: The response of the HttpRequest.
"""
raise NotImplementedError("Please Implement this method")
def execute_as_binary(self, request):
"""Execute a given HttpRequest to get a binary response back
Args:
request (HttpRequest): The given HttpRequest to execute.
Returns:
HttpResponse: The response of the HttpRequest.
"""
raise NotImplementedError("Please Implement this method")
def convert_response(self, response, binary):
"""Converts the Response object of the HttpClient into an
HttpResponse object.
Args:
response (dynamic): The original response object.
Returns:
HttpResponse: The converted HttpResponse object.
"""
raise NotImplementedError("Please Implement this method")
def get(self, query_url,
headers={},
query_parameters={}):
"""Create a simple GET HttpRequest object for the given parameters
Args:
query_url (string): The URL to send the request to.
headers (dict, optional): The headers for the HTTP Request.
query_parameters (dict, optional): Query parameters to add in the
URL.
Returns:
HttpRequest: The generated HttpRequest for the given paremeters.
"""
return HttpRequest(HttpMethodEnum.GET,
query_url,
headers,
query_parameters,
None,
None)
def head(self, query_url,
headers={},
query_parameters={}):
"""Create a simple HEAD HttpRequest object for the given parameters
Args:
query_url (string): The URL to send the request to.
headers (dict, optional): The headers for the HTTP Request.
query_parameters (dict, optional): Query parameters to add in the
URL.
Returns:
HttpRequest: The generated HttpRequest for the given paremeters.
"""
return HttpRequest(HttpMethodEnum.HEAD,
query_url,
headers,
query_parameters,
None,
None)
def post(self, query_url,
headers={},
query_parameters={},
parameters={},
files={}):
"""Create a simple POST HttpRequest object for the given parameters
Args:
query_url (string): The URL to send the request to.
headers (dict, optional): The headers for the HTTP Request.
query_parameters (dict, optional): Query parameters to add in the
URL.
parameters (dict, optional): Form or body parameters to be included
in the body.
files (dict, optional): Files to be sent with the request.
Returns:
HttpRequest: The generated HttpRequest for the given paremeters.
"""
return HttpRequest(HttpMethodEnum.POST,
query_url,
headers,
query_parameters,
parameters,
files)
def put(self, query_url,
headers={},
query_parameters={},
parameters={},
files={}):
"""Create a simple PUT HttpRequest object for the given parameters
Args:
query_url (string): The URL to send the request to.
headers (dict, optional): The headers for the HTTP Request.
query_parameters (dict, optional): Query parameters to add in the
URL.
parameters (dict, optional): Form or body parameters to be included
in the body.
files (dict, optional): Files to be sent with the request.
Returns:
HttpRequest: The generated HttpRequest for the given paremeters.
"""
return HttpRequest(HttpMethodEnum.PUT,
query_url,
headers,
query_parameters,
parameters,
files)
def patch(self, query_url,
headers={},
query_parameters={},
parameters={},
files={}):
"""Create a simple PATCH HttpRequest object for the given parameters
Args:
query_url (string): The URL to send the request to.
headers (dict, optional): The headers for the HTTP Request.
query_parameters (dict, optional): Query parameters to add in the
URL.
parameters (dict, optional): Form or body parameters to be included
in the body.
files (dict, optional): Files to be sent with the request.
Returns:
HttpRequest: The generated HttpRequest for the given paremeters.
"""
return HttpRequest(HttpMethodEnum.PATCH,
query_url,
headers,
query_parameters,
parameters,
files)
def delete(self, query_url,
headers={},
query_parameters={},
parameters={},
files={}):
"""Create a simple DELETE HttpRequest object for the given parameters
Args:
query_url (string): The URL to send the request to.
headers (dict, optional): The headers for the HTTP Request.
query_parameters (dict, optional): Query parameters to add in the
URL.
parameters (dict, optional): Form or body parameters to be
included in the body.
files (dict, optional): Files to be sent with the request.
Returns:
HttpRequest: The generated HttpRequest for the given paremeters.
"""
return HttpRequest(HttpMethodEnum.DELETE,
query_url,
headers,
query_parameters,
parameters,
files) | APIMaticSampleCalcPyPI | /APIMaticSampleCalcPyPI-1.2-py3-none-any.whl/calculatormharis/http/http_client.py | http_client.py |
import platform
from calculatormharis.api_helper import APIHelper
from calculatormharis.exceptions.api_exception import APIException
class BaseController(object):
"""All controllers inherit from this base class.
Attributes:
config (Configuration): The HttpClient which a specific controller
instance will use. By default all the controller objects share
the same HttpClient. A user can use his own custom HttpClient
as well.
http_call_back (HttpCallBack): An object which holds call back
methods to be called before and after the execution of an HttpRequest.
global_headers (dict): The global headers of the API which are sent with
every request.
"""
def global_headers(self):
return {
'user-agent': self.get_user_agent()
}
def __init__(self, config):
self._config = config
self._http_call_back = config.http_call_back
@property
def config(self):
return self._config
@property
def http_call_back(self):
return self._http_call_back
def validate_parameters(self, **kwargs):
"""Validates required parameters of an endpoint.
Args:
kwargs (dict): A dictionary of the required parameters.
"""
for name, value in kwargs.items():
if value is None:
raise ValueError("Required parameter {} cannot be None.".format(name))
def execute_request(self, request, binary=False, to_retry=None):
"""Executes an HttpRequest.
Args:
request (HttpRequest): The HttpRequest to execute.
binary (bool): A flag which should be set to True if
a binary response is expected.
to_retry (bool): whether to retry on a particular request
Returns:
HttpResponse: The HttpResponse received.
"""
# Invoke the on before request HttpCallBack if specified
if self.http_call_back is not None:
self.http_call_back.on_before_request(request)
# Add global headers to request
prepared_headers = {key: str(value) for key, value in request.headers.items()}
request.headers = APIHelper.merge_dicts(self.global_headers(), prepared_headers)
# Invoke the API call to fetch the response.
func = self.config.http_client.execute_as_binary if binary else self.config.http_client.execute_as_string
response = func(request, to_retry=to_retry)
# Invoke the on after response HttpCallBack if specified
if self.http_call_back is not None:
self.http_call_back.on_after_response(response)
return response
def validate_response(self, response):
"""Validates an HTTP response by checking for global errors.
Args:
response (HttpResponse): The HttpResponse of the API call.
"""
if (response.status_code < 200) or (response.status_code > 208): # [200,208] = HTTP OK
raise APIException('HTTP response not OK.', response)
def get_user_agent(self):
user_agent = 'APIMATIC 3.0'
parameters = {
}
agent = APIHelper.append_url_with_template_parameters(user_agent, parameters)
return agent.replace(' ', ' ') | APIMaticSampleCalcPyPI | /APIMaticSampleCalcPyPI-1.2-py3-none-any.whl/calculatormharis/controllers/base_controller.py | base_controller.py |
from calculatormharis.api_helper import APIHelper
from calculatormharis.configuration import Server
from calculatormharis.controllers.base_controller import BaseController
class SimpleCalculatorController(BaseController):
"""A Controller to access Endpoints in the calculatormharis API."""
def __init__(self, config):
super(SimpleCalculatorController, self).__init__(config)
def calculate(self,
operation,
x,
y):
"""Does a GET request to /{operation}.
Calculates the expression using the specified operation.
Args:
operation (OperationTypeEnum): The operator to apply on the
variables.
x (float): The LHS value.
y (float): The RHS value.
Returns:
float: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/{operation}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'operation': {'value': operation, 'encode': True}
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_parameters = {
'x': x,
'y': y
}
_query_builder = APIHelper.append_url_with_query_parameters(
_query_builder,
_query_parameters
)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare and execute request
_request = self.config.http_client.get(_query_url)
_response = self.execute_request(_request)
self.validate_response(_response)
decoded = float(_response.text)
return decoded
def w(self):
"""Does a GET request to /W.
TODO: type endpoint description here.
Returns:
void: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/W'
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare and execute request
_request = self.config.http_client.get(_query_url)
_response = self.execute_request(_request)
self.validate_response(_response)
def y(self):
"""Does a GET request to /Y.
TODO: type endpoint description here.
Returns:
void: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/Y'
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare and execute request
_request = self.config.http_client.get(_query_url)
_response = self.execute_request(_request)
self.validate_response(_response)
def a(self):
"""Does a GET request to /A.
TODO: type endpoint description here.
Returns:
void: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/A'
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare and execute request
_request = self.config.http_client.get(_query_url)
_response = self.execute_request(_request)
self.validate_response(_response) | APIMaticSampleCalcPyPI | /APIMaticSampleCalcPyPI-1.2-py3-none-any.whl/calculatormharis/controllers/simple_calculator_controller.py | simple_calculator_controller.py |
<div style="text-align:center"><img width="300px" src="doc/jeitto.svg" /></div>
# APIQrCode
SDK QrCode Cielo
## Requisitos:
- Python 3.x
## Uso:
- Instalação:
``pip install APIQrCode``
- Exemplo:
```python
from APIQrCode.qrcode_cielo import QrCodeCielo
qrcode_cielo = QrCodeCielo(client_id='your client id', client_secret='your client secret', sandbox=True)
# get access token data
access_token_data = qrcode_cielo.get_access_token()
access_token = access_token_data.get('access_token')
print(f'Access token data : {access_token_data}')
# parse qrcode
parse_qrcode = qrcode_cielo.parse_qrcode(access_token=access_token, qrcode='your qrcode')
print(f'Parse QRCode: {parse_qrcode}')
# get public key data
public_key_data = qrcode_cielo.get_public_key(access_token=access_token)
print(f'Public Key: {public_key_data}')
# process payment
public_key = public_key_data.get('public_key')
public_key_id = public_key_data.get('key_id')
card_data = {
'card_number': '5496228363201473',
'card_cvv': '128',
'card_holder_name': 'JOAO DA SILVA',
'card_expiration_date': '0225'
}
data = {
'key_id': public_key_id,
'payee_document': '78362896051',
'qrcode': 'qrcode',
'card_data': card_data,
'public_key': public_key
}
payment_card = qrcode_cielo.payment_card(access_token=access_token, data=data)
print(f'Payment card: {payment_card}')
```
- Exemplo completo em: ```/example/qr_code_cielo.py``` | APIQrCode | /APIQrCode-1.0.6.tar.gz/APIQrCode-1.0.6/README.md | README.md |
**APImetrics-Python-Client**
Command line-callable Python library that makes it easier to call APImetrics' APIs.
For use with your APImetrics monitoring service. Please sign up at http://client.apimetrics.io and create an API key at https://client.apimetrics.io/settings/api-key
**Installation**
Create a settings file at ``/etc/APImetrics`` or ``~/.APImetrics`` or locally (specify it with the ``-cfg`` flag)
Use command ``apimetrics -a YOUR_API_KEY`` to save the key.
**Command-line usage**::
usage: apimetrics [-h] [--apimetrics APIMETRICS] [--config CONFIG]
[--simulate]
{auth,call,deployment,report,token,workflow,alert,notification}
...
positional arguments:
{auth,call,deployment,report,token,workflow,alert,notification}
sub-command help
auth auth help
call call help
deployment deployment help
report report help
token token help
workflow workflow help
alert alert help
notification notification help
optional arguments:
-h, --help show this help message and exit
apimetrics:
APImetrics settings
--apimetrics APIMETRICS, -a APIMETRICS
Set the APImetrics key to use
--config CONFIG, -cfg CONFIG
Set the config file to use
--simulate, -s Simulate - don't call the APImetrics API
**APImetrics module**
You may also write your own scripts - look in the ``apimetrics/scripts`` folder for example code.
The example may also be called from the command line, e.g.:
``python -m apimetrics.scripts.delete_deployments --name "^z "``
This version tested with Python 2.7.6 and 3.5.2
| APImetrics | /APImetrics-0.2.2.tar.gz/APImetrics-0.2.2/README.rst | README.rst |
from __future__ import print_function
import logging
import json
import requests
from six.moves import input
from apimetrics.errors import APImetricsError
#https://urllib3.readthedocs.org/en/latest/security.html#insecureplatformwarning
requests.packages.urllib3.disable_warnings()
def _get_list_method(obj_name, search_term=None):
def list_obj(self, **kwargs):
url = '{url}/{obj_name}/'
if search_term:
assert search_term in kwargs
url += '{}/{}/'.format(search_term, kwargs[search_term])
del kwargs[search_term]
if not self.simulate:
full_url = url.format(url=self.api_base_url, obj_name=obj_name)
logging.debug('GET %s', full_url)
resp = requests.get(
full_url,
headers=self.headers,
params=kwargs)
return self.handle_resp(resp, 200)
return {'results': [], 'meta': {'more': False, 'next_cursor': None}}
return list_obj
def _get_list_all_method(obj_name, search_term=None):
list_fn = _get_list_method(obj_name, search_term)
def list_all_obj(self, **kwargs):
output = {'results':[], 'meta': {'more': False, 'next_cursor': None}}
more = True
cursor = None
while more:
resp = list_fn(self, cursor=cursor, **kwargs)
more = resp['meta']['more']
cursor = resp['meta']['next_cursor']
output['results'].extend(resp['results'])
return output
return list_all_obj
def _get_object_method(obj_name):
def get_obj(self, obj_id, **kwargs):
url = '{url}/{obj_name}/{obj_id}/'
full_url = url.format(url=self.api_base_url, obj_name=obj_name, obj_id=obj_id)
if not self.simulate:
logging.debug('GET %s', full_url)
resp = requests.get(
full_url,
headers=self.headers,
params=kwargs)
return self.handle_resp(resp, 200)
return {'id': obj_id}
return get_obj
def _delete_object_method(obj_name):
def del_obj(self, obj_id, **kwargs):
url = '{url}/{obj_name}/{obj_id}/'
if not self.simulate:
full_url = url.format(url=self.api_base_url, obj_name=obj_name, obj_id=obj_id)
logging.debug('DELETE %s', full_url)
resp = requests.delete(
full_url,
headers=self.headers,
params=kwargs)
return self.handle_resp(resp, 200)
return {'id': obj_id}
return del_obj
def _create_object_method(obj_name):
def create_obj(self, obj, **kwargs):
url = '{url}/{obj_name}/'
full_url = url.format(url=self.api_base_url, obj_name=obj_name)
logging.debug('PUT %s', full_url)
if not self.simulate:
resp = requests.put(
full_url,
headers=self.post_headers,
data=json.dumps(obj),
params=kwargs)
return self.handle_resp(resp, 201)
obj['id'] = 'DUMMY'
return obj
return create_obj
def _update_object_method(obj_name):
def update_obj(self, obj_id, obj, **kwargs):
url = '{url}/{obj_name}/{obj_id}/'
full_url = url.format(url=self.api_base_url, obj_name=obj_name, obj_id=obj_id)
if not self.simulate:
logging.debug('POST %s', full_url)
resp = requests.post(
full_url,
headers=self.post_headers,
data=json.dumps(obj),
params=kwargs)
return self.handle_resp(resp, 200)
return obj
return update_obj
class APImetricsAPI(object):
API_BASE_URL = "https://client.apimetrics.io/api/2"
@property
def headers(self):
return {
'Authorization': 'Bearer {token}'.format(token=self.apimetrics_key)
}
@property
def post_headers(self):
return {
'Authorization': 'Bearer {token}'.format(token=self.apimetrics_key),
'Content-Type': 'application/json'
}
def __init__(self, apimetrics_key=None, always_use_existing=False, always_create_new=False, simulate=False, api_base_url=None): # pylint: disable=R0913
self.always_use_existing = always_use_existing
self.always_create_new = always_create_new
self.apimetrics_key = apimetrics_key if apimetrics_key else None
self.simulate = simulate
self.api_base_url = api_base_url if api_base_url else self.API_BASE_URL
if not self.apimetrics_key:
raise APImetricsError("Missing APImetrics API key - please genereate a key at https://client.apimetrics.io/settings/api-key and use the -a flag to store it.")
list_auth = _get_list_method('auth')
list_auth_by_domain = _get_list_method('auth', 'domain')
list_calls = _get_list_method('calls')
list_calls_by_auth = _get_list_method('calls', 'auth')
list_deployments = _get_list_method('deployments')
list_deployments_by_call = _get_list_method('deployments', 'call')
list_deployments_by_workflow = _get_list_method('deployments', 'workflow')
list_reports = _get_list_method('reports')
list_tokens = _get_list_method('tokens')
list_tokens_by_auth = _get_list_method('tokens', 'auth')
list_workflows = _get_list_method('workflows')
list_all_auth = _get_list_all_method('auth')
list_all_auth_by_domain = _get_list_all_method('auth', 'domain')
list_all_calls = _get_list_all_method('calls')
list_all_calls_by_auth = _get_list_all_method('calls', 'auth')
list_all_deployments = _get_list_all_method('deployments')
list_all_deployments_by_call = _get_list_all_method('deployments', 'call')
list_all_deployments_by_workflow = _get_list_all_method('deployments', 'workflow') # pylint: disable=C0103
list_all_reports = _get_list_all_method('reports')
list_all_tokens = _get_list_all_method('tokens')
list_all_tokens_by_auth = _get_list_all_method('tokens', 'auth')
list_all_workflows = _get_list_all_method('workflows')
get_auth = _get_object_method('auth')
get_call = _get_object_method('calls')
get_deployment = _get_object_method('deployments')
get_report = _get_object_method('reports')
get_token = _get_object_method('tokens')
get_workflow = _get_object_method('workflows')
create_auth = _create_object_method('auth')
create_call = _create_object_method('calls')
create_deployment = _create_object_method('deployments')
create_report = _create_object_method('reports')
create_token = _create_object_method('tokens')
create_workflow = _create_object_method('workflows')
update_auth = _update_object_method('auth')
update_call = _update_object_method('calls')
update_deployment = _update_object_method('deployments')
update_report = _update_object_method('reports')
update_token = _update_object_method('tokens')
update_workflow = _update_object_method('workflows')
delete_auth = _delete_object_method('auth')
delete_call = _delete_object_method('calls')
delete_deployment = _delete_object_method('deployments')
delete_report = _delete_object_method('reports')
delete_token = _delete_object_method('tokens')
delete_workflow = _delete_object_method('workflows')
def handle_resp(self, resp, expected_status_code=200):
try:
output = resp.json()
except ValueError:
return APImetricsError(resp)
if resp.status_code == expected_status_code:
return output
raise APImetricsError(output.get('error_msg', output))
def _ask_user_for_pick(self, object_name, output):
object_id = None
if output:
selected = -1
if not self.always_use_existing and not self.always_create_new:
print('0: Create new {}'.format(object_name))
for i, service in enumerate(output):
print("{}: {}".format(i+1, service.get('name', 'NAME?')))
selected = -1
while selected < 0 or selected > len(output):
inp_str = input('Enter number for {} to use: '.format(object_name))
try:
selected = int(inp_str)
except (ValueError, TypeError):
selected = -1
selected -= 1
elif self.always_use_existing:
selected = 0
if selected >= 0:
object_id = output[selected]['id']
return object_id
def list_results(self, call_id, **kwargs):
url = '{url}/calls/{call_id}/results/'
resp = requests.get(
url.format(url=self.api_base_url, call_id=call_id),
headers=self.headers,
params=kwargs)
return self.handle_resp(resp)
# Calling this could take a *LONG* time
def list_all_results(self, call_id, **kwargs):
output = {'results':[], 'meta': {'more': False, 'next_cursor': None}}
more = True
cursor = None
while more:
resp = self.list_results(call_id, cursor=cursor, **kwargs)
more = resp['meta']['more']
cursor = resp['meta']['next_cursor']
output['results'].extend(resp['results'])
return output | APImetrics | /APImetrics-0.2.2.tar.gz/APImetrics-0.2.2/apimetrics/api.py | api.py |
from __future__ import print_function
import logging
import sys
import os
import io
import json
import argparse
from six.moves import input, configparser
from apimetrics.errors import APImetricsError
from apimetrics.api import APImetricsAPI
logging.basicConfig(
stream=sys.stdout,
format='%(asctime)s:%(levelname)s: %(message)s',
level=os.environ.get('DEBUG_LEVEL') or logging.INFO)
log = logging.getLogger(__name__) # pylint: disable=C0103
# Generic class for handling command-line interaction with the APImetrics API
class APImetricsCLI(object):
def __init__(self, api_class=APImetricsAPI):
self._args = None
self.parser = self.get_argument_parser()
apimetrics_args = self.get_apimetrics_args()
self.api = api_class(**apimetrics_args)
@property
def args(self):
if not self._args:
self._args = vars(self.parser.parse_args())
return self._args
# Override this method to add more arguments to your script
def get_argument_parser(self):
parser = argparse.ArgumentParser(fromfile_prefix_chars='@')
apim_group = parser.add_argument_group('apimetrics', 'APImetrics settings')
apim_group.add_argument('--apimetrics', '-a', help='Set the APImetrics key to use')
apim_group.add_argument('--config', '-cfg', help='Set the config file to use')
apim_group.add_argument('--simulate', '-s', help='Simulate - don\'t call the APImetrics API', action="store_true")
return parser
def get_apimetrics_args(self):
config_file, config = self.open_config_file()
apimetrics_key = config.get('APImetrics', 'apimetrics_key') if config_file else None
apimetrics_args = {
'apimetrics_key': self.args.get('apimetrics') or apimetrics_key,
'api_base_url': config.get('APImetrics', 'base_url') if config.has_option('APImetrics', 'base_url') else None,
'simulate': self.args.get('simulate') or False,
}
if config_file and apimetrics_args['apimetrics_key'] and apimetrics_args['apimetrics_key'] != apimetrics_key:
with open(config_file, 'w') as config_file:
config.set('APImetrics', 'apimetrics_key', apimetrics_args['apimetrics_key'])
config.write(config_file)
return apimetrics_args
def open_config_file(self):
config_file = ['/etc/APImetrics', os.path.expanduser('~/.APImetrics'), 'apimetrics.ini']
default_config = "[APImetrics]\napimetrics_key = "
cfg = configparser.ConfigParser(allow_no_value=True)
if sys.version_info[0] >= 3:
cfg.readfp(io.StringIO(default_config))
else:
cfg.readfp(io.BytesIO(default_config))
if self.args['config']:
config_file = self.args['config']
success_files = cfg.read(config_file)
if success_files:
config_file = success_files[-1]
else:
log.warn("Unable to find any config files to open!")
config_file = None
return config_file, cfg
# Class to handle the commands for this specific script
class APImetricsScript(APImetricsCLI):
# Overriding APImetricsCLI to add our command-line specific commands
def get_argument_parser(self):
def add_common_args(parser):
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--list', '-l', help="List objects", action="store_true")
group.add_argument('--create', '-c', help="Create object", action="store_true")
group.add_argument('--read', '-r', '--view', help="Read object as JSON by id")
group.add_argument('--update', '-u', help="Update object by id")
group.add_argument('--delete', '-d', help="Delete object by id")
#parser.add_argument('--deploy', '-p', help="Deploy objects")
#parser.add_argument('--results', '-r', help="View API call results")
parser = super(APImetricsScript, self).get_argument_parser()
subparsers = parser.add_subparsers(help='sub-command help')
# create the parser for the models
for model in ('auth', 'call', 'deployment', 'report', 'token', 'workflow', 'alert', 'notification'):
sub_parser = subparsers.add_parser(model, help='{} help'.format(model))
sub_parser.set_defaults(command_type=model)
add_common_args(sub_parser)
return parser
# Additioanl commands for command-line
def get_script_args(self):
command_type = self.args.get('command_type')
command = None
command_opt = None
command_opts = ('list', 'create', 'read', 'update', 'delete')
for cmd in command_opts:
if self.args.get(cmd, None):
command = cmd
command_opt = self.args.get(cmd)
break
return command_type, command, command_opt
def list(self, command_type, _, **kwargs):
if command_type not in ['auth']:
func = getattr(self.api, 'list_all_{}s'.format(command_type))
else:
func = getattr(self.api, 'list_all_{}'.format(command_type))
resp = func(**kwargs)
for i, obj in enumerate(resp['results']):
if command_type != 'deployment':
print(u'{index}: {id} - {name}'.format(index=i+1, id=obj['id'], name=obj.get('meta', {}).get('name')))
else:
print(u'{index}: {id} - {target_id} @{frequency}m +{run_delay}s'.format(index=i+1, id=obj.get('id'), **obj.get('deployment')))
def create(self, command_type, _, **kwargs):
string_input = u'\n'.join([x for x in sys.stdin])
print(string_input)
obj = json.loads(string_input)
func = getattr(self.api, 'create_{}'.format(command_type))
resp = func(obj, **kwargs)
print(json.dumps(resp, indent=2))
def read(self, command_type, command_opt, **kwargs):
func = getattr(self.api, 'get_{}'.format(command_type))
resp = func(command_opt, **kwargs)
print(json.dumps(resp, indent=2))
return resp
def update(self, command_type, command_opt, **kwargs):
string_input = u''.join([x for x in sys.stdin])
print(string_input)
try:
obj = json.loads(string_input)
except:
raise APImetricsError('Input is not JSON')
func = getattr(self.api, 'update_{}'.format(command_type))
resp = func(command_opt, obj, **kwargs)
print(json.dumps(resp, indent=2))
def delete(self, command_type, command_opt, **kwargs):
resp = self.read(command_type, command_opt, **kwargs)
func = getattr(self.api, 'delete_{}'.format(command_type))
inp_str = input('Enter "YES" to confirm that you want to delete all: ')
if inp_str == "YES":
for i, obj in enumerate(resp['results']):
resp2 = func(obj['id'], **kwargs)
print(u'{}: {}'.format(i, resp2['status']))
def deploy(self, command_type, command_opt, **kwargs):
if command_type in ['call', 'workflow']:
resp = self.read(command_type, command_opt, **kwargs)
run_delay = 10
for i, obj in enumerate(resp['results']):
resp2 = self.api.create_deployment(
{
'target_key': obj['id'],
'remote_location': '',
'frequency': 60*6,
'run_delay': run_delay
})
run_delay += 10
print(u'{}: {}'.format(i, resp2['run_delay']))
def results(self, command_type, _, **kwargs):
more = True
cursor = None
print('[\n')
while more:
resp = self.api.list_results(call_id=command_type, cursor=cursor, **kwargs)
more = resp['meta']['more']
cursor = resp['meta']['next_cursor']
strings = []
for result in resp['results']:
strings.append(json.dumps(result, indent=2))
print(u',\n'.join(strings))
if more:
print(',\n') # Keep JSON valid
print(']\n')
def run(self, **kwargs):
command_type, command, command_opt = self.get_script_args()
print('Command {}, type {}'.format(command, command_type))
command_fn = getattr(self, command)
command_fn(command_type, command_opt, **kwargs)
def main():
cli = APImetricsScript()
try:
cli.run()
except APImetricsError as ex:
print("ERROR: {}".format(ex), file=sys.stderr) | APImetrics | /APImetrics-0.2.2.tar.gz/APImetrics-0.2.2/apimetrics/cli.py | cli.py |
from __future__ import print_function
import logging
import os
import re
import sys
from six.moves import input
import apimetrics
logging.basicConfig(
stream=sys.stdout,
format='%(asctime)s:%(levelname)s: %(message)s',
level=os.environ.get('DEBUG_LEVEL') or logging.INFO)
log = logging.getLogger(__name__) # pylint: disable=C0103
class DeploymentsRemover(apimetrics.APImetricsCLI):
# Overriding APImetricsCLI to add our command-line specific commands
def get_argument_parser(self):
parser = super(DeploymentsRemover, self).get_argument_parser()
parser.add_argument('--non-interactive', '-y', help="Interactive mode, ask for each API call", action="store_true", default=False)
parser.add_argument('--name', '-n', help="Only APIs which match this name")
return parser
def ask_user_about_call(self, call):
if self.args.get('name'):
api_name = call['meta']['name']
if not re.search(self.args.get('name'), api_name):
return False
if not self.args.get('non_interactive'):
inp_str = input('Delete deployments for API call "{name}"? y/N: '.format(**call.get('meta')))
return inp_str.lower() == 'y'
return True
def run(self, **kwargs):
list_of_calls = self.api.list_all_calls(**kwargs)
for call in list_of_calls['results']:
if self.ask_user_about_call(call):
deployments = self.api.list_deployments_by_call(call=call['id'], **kwargs)
for deployment in deployments['results']:
print('Deleting deployment {location_id} for api {name}...'.format(name=call['meta']['name'], **deployment.get('deployment')), end='\t\t')
self.api.delete_deployment(deployment['id'], **kwargs)
print('OK')
def main():
cli = DeploymentsRemover()
try:
cli.run()
except apimetrics.APImetricsError as ex:
print("ERROR: {}".format(ex), file=sys.stderr)
if __name__ == '__main__':
main() | APImetrics | /APImetrics-0.2.2.tar.gz/APImetrics-0.2.2/apimetrics/scripts/delete_deployments.py | delete_deployments.py |
from __future__ import print_function
import logging
import os
import sys
import itertools
import math
import apimetrics
logging.basicConfig(
stream=sys.stdout,
format='%(asctime)s:%(levelname)s: %(message)s',
level=os.environ.get('DEBUG_LEVEL') or logging.INFO)
log = logging.getLogger(__name__) # pylint: disable=C0103
def _index_of_first(lst, pred):
for i, item in enumerate(lst):
if pred(item):
return i
return None
class TidyDeploymentsScript(apimetrics.APImetricsCLI): # pylint: disable=R0903
def run(self, **kwargs): # pylint: disable=R0914
deployments = self.api.list_all_deployments(**kwargs).get('results')
all_deployments = sorted(deployments, key=lambda deploy: deploy.get('deployment', {}).get('run_delay'))
frequencies = sorted(set(deploy.get('deployment', {}).get('frequency') for deploy in deployments))
for freq in frequencies:
log.info('Frequency: %d', freq)
deployments = [deploy for deploy in all_deployments if deploy.get('deployment', {}).get('frequency') == freq]
locations = sorted(set(deploy.get('deployment', {}).get('location_id') for deploy in deployments if deploy.get('deployment', {}).get('frequency') == freq))
targets = sorted(set(deploy.get('deployment', {}).get('target_id') for deploy in deployments if deploy.get('deployment', {}).get('frequency') == freq))
loc_len = len(locations)
trg_len = len(targets)
dep_len = len(deployments)
total = loc_len * trg_len
log.debug("Locations: %s", locations)
log.debug("Targets: %s", [t[-8:] for t in targets])
combos = list(itertools.product(locations, targets))
output = []
log.debug("Total: %d, Potential Max: %d", dep_len, total)
while combos:
j = 0
for i in range(total):
trg_ind = i % trg_len
combo = None
while combo not in combos:
loc_ind = j % loc_len
combo = (locations[loc_ind], targets[trg_ind])
j += 1
if combo in combos:
def index_of_matching(loc, trg):
def match(deploy):
return deploy.get('deployment', {}).get('location_id') == loc and deploy.get('deployment', {}).get('target_id') == trg
return match
deploy_ind = _index_of_first(deployments, index_of_matching(*combo))
if deploy_ind is not None:
# print(deploy_ind, combo)
output.append(deployments[deploy_ind])
else:
log.debug('SKIP %s', combo)
combos.remove(combo)
j += 1
gap_per_deploy = (freq * 60) / float(dep_len + 1) # Skip the 0 run_delay for everywhere
for index0, deploy in enumerate(output):
index = index0 + 1
info = deploy.get('deployment', {})
location_id = info.get('location_id')
target_id = info.get('target_id')
new_run_delay = int(math.ceil(index * gap_per_deploy))
freq = info.get('frequency')
info_str = "ID: {} \t Freq: {} \t {} -> {} \t {} \t {}".format(deploy['id'][-8:], freq, info['run_delay'], new_run_delay, target_id[-8:], location_id)
log.info(info_str)
if new_run_delay != info['run_delay']:
self.api.update_deployment(
obj_id=deploy['id'],
obj={
'deployment': {
'run_delay': new_run_delay
}
},
**kwargs)
def main():
cli = TidyDeploymentsScript()
try:
cli.run()
except apimetrics.APImetricsError as ex:
print("ERROR: {}".format(ex), file=sys.stderr)
if __name__ == '__main__':
main() | APImetrics | /APImetrics-0.2.2.tar.gz/APImetrics-0.2.2/apimetrics/scripts/spread_out_deployments.py | spread_out_deployments.py |
from __future__ import print_function
import logging
import os
import sys
import math
import random
import re
from six.moves import input
import apimetrics
logging.basicConfig(
stream=sys.stdout,
format='%(asctime)s:%(levelname)s: %(message)s',
level=os.environ.get('DEBUG_LEVEL') or logging.INFO)
log = logging.getLogger(__name__) # pylint: disable=C0103
class DeploymentCreator(apimetrics.APImetricsCLI):
# Overriding APImetricsCLI to add our command-line specific commands
def get_argument_parser(self):
parser = super(DeploymentCreator, self).get_argument_parser()
parser.add_argument('location_ids', metavar='LOC', nargs='+', help="Location ID to deploy to")
parser.add_argument('--frequency', '-f', type=int, help="Frequency to make API call (minutes)")
parser.add_argument('--interactive', '-i', help="Interactive mode, ask for each API call", action="store_true")
parser.add_argument('--name', '-n', help="Only APIs which match this name")
return parser
def ask_user_about_call(self, call):
if self.args.get('name'):
api_name = call['meta']['name']
if not re.search(self.args.get('name'), api_name):
return False
if self.args.get('interactive'):
inp_str = input('Change deployments for Workflow "{name}"? y/N: '.format(**call.get('meta')))
return inp_str.lower() == 'y'
return True
def run(self, **kwargs):
list_of_calls = self.api.list_all_workflows(**kwargs)
locations = list(self.args['location_ids'])
for call in list_of_calls['results']:
if self.ask_user_about_call(call):
deployments = self.api.list_deployments_by_workflow(call=call['id'], **kwargs)
for deployment in deployments['results']:
print('Deleting old deployment {location_id} for api {name}...'.format(name=call['meta']['name'], **deployment.get('deployment')), end='\t\t')
self.api.delete_deployment(deployment['id'], **kwargs)
print('OK')
# Spread out API calls, avoid exactly on the hour etc
frequency = self.args.get('frequency', 10)
gap = math.ceil(float(frequency * 60) / (1.0 + len(self.args['location_ids'])))
random.shuffle(locations)
for i, location_id in enumerate(locations):
deployment = {
'deployment': {
'target_id': call['id'],
'location_id': location_id,
'frequency': frequency,
'run_delay': int((i + 1) * gap),
}
}
print('New deployment {location_id} for api {name}, freq {frequency}, delay {run_delay}s...'.format(name=call['meta']['name'], **deployment['deployment']), end='\t\t')
self.api.create_deployment(deployment)
print('OK')
def main():
cli = DeploymentCreator()
try:
cli.run()
except apimetrics.APImetricsError as ex:
print("ERROR: {}".format(ex), file=sys.stderr)
if __name__ == '__main__':
main() | APImetrics | /APImetrics-0.2.2.tar.gz/APImetrics-0.2.2/apimetrics/scripts/deploy_all_workflows.py | deploy_all_workflows.py |
from __future__ import print_function
import logging
import os
import sys
import math
import random
import re
from six.moves import input
import apimetrics
logging.basicConfig(
stream=sys.stdout,
format='%(asctime)s:%(levelname)s: %(message)s',
level=os.environ.get('DEBUG_LEVEL') or logging.INFO)
log = logging.getLogger(__name__) # pylint: disable=C0103
class DeploymentCreator(apimetrics.APImetricsCLI):
# Overriding APImetricsCLI to add our command-line specific commands
def get_argument_parser(self):
parser = super(DeploymentCreator, self).get_argument_parser()
parser.add_argument('location_ids', metavar='LOC', nargs='+', help="Location ID to deploy to")
parser.add_argument('--frequency', '-f', type=int, help="Frequency to make API call (minutes)")
parser.add_argument('--interactive', '-i', help="Interactive mode, ask for each API call", action="store_true")
parser.add_argument('--name', '-n', help="Only APIs which match this name")
return parser
def ask_user_about_call(self, call):
if self.args.get('name'):
api_name = call['meta']['name']
if not re.search(self.args.get('name'), api_name):
return False
if self.args.get('interactive'):
inp_str = input('Change deployments for API call "{name}"? y/N: '.format(**call.get('meta')))
return inp_str.lower() == 'y'
return True
def run(self, **kwargs):
list_of_calls = self.api.list_all_calls(**kwargs)
locations = list(self.args['location_ids'])
for call in list_of_calls['results']:
if self.ask_user_about_call(call):
deployments = self.api.list_deployments_by_call(call=call['id'], **kwargs)
for deployment in deployments['results']:
print('Deleting old deployment {location_id} for api {name}...'.format(name=call['meta']['name'], **deployment.get('deployment')), end='\t\t')
self.api.delete_deployment(deployment['id'], **kwargs)
print('OK')
# Spread out API calls, avoid exactly on the hour
frequency = self.args.get('frequency', 10)
gap = math.ceil(float(frequency * 60) / (1.0 + len(self.args['location_ids'])))
random.shuffle(locations)
for i, location_id in enumerate(locations):
deployment = {
'deployment': {
'target_id': call['id'],
'location_id': location_id,
'frequency': frequency,
'run_delay': int((i + 1) * gap),
}
}
print('New deployment {location_id} for api {name}, freq {frequency}, delay {run_delay}s...'.format(name=call['meta']['name'], **deployment['deployment']), end='\t\t')
self.api.create_deployment(deployment)
print('OK')
def main():
cli = DeploymentCreator()
try:
cli.run()
except apimetrics.APImetricsError as ex:
print("ERROR: {}".format(ex), file=sys.stderr)
if __name__ == '__main__':
main() | APImetrics | /APImetrics-0.2.2.tar.gz/APImetrics-0.2.2/apimetrics/scripts/deploy_all_apis.py | deploy_all_apis.py |
from __future__ import print_function
import logging
import os
import re
import sys
from six.moves import input
import apimetrics
logging.basicConfig(
stream=sys.stdout,
format='%(asctime)s:%(levelname)s: %(message)s',
level=os.environ.get('DEBUG_LEVEL') or logging.INFO)
log = logging.getLogger(__name__) # pylint: disable=C0103
class DeploymentsRemover(apimetrics.APImetricsCLI):
# Overriding APImetricsCLI to add our command-line specific commands
def get_argument_parser(self):
parser = super(DeploymentsRemover, self).get_argument_parser()
parser.add_argument('--non-interactive', '-y', help="Interactive mode, ask for each API call", action="store_true", default=False)
parser.add_argument('--name', '-n', help="Only APIs which match this name")
return parser
def ask_user_about_call(self, call):
if self.args.get('name'):
api_name = call['meta']['name']
if not re.search(self.args.get('name'), api_name):
return False
if not self.args.get('non_interactive'):
inp_str = input('Delete deployments for Workflow "{name}"? y/N: '.format(**call.get('meta')))
return inp_str.lower() == 'y'
return True
def run(self, **kwargs):
list_of_calls = self.api.list_all_workflows(**kwargs)
for call in list_of_calls['results']:
if self.ask_user_about_call(call):
deployments = self.api.list_deployments_by_workflow(call=call['id'], **kwargs)
for deployment in deployments['results']:
print('Deleting deployment {location_id} for Workflow {name}...'.format(name=call['meta']['name'], **deployment.get('deployment')), end='\t\t')
self.api.delete_deployment(deployment['id'], **kwargs)
print('OK')
def main():
cli = DeploymentsRemover()
try:
cli.run()
except apimetrics.APImetricsError as ex:
print("ERROR: {}".format(ex), file=sys.stderr)
if __name__ == '__main__':
main() | APImetrics | /APImetrics-0.2.2.tar.gz/APImetrics-0.2.2/apimetrics/scripts/delete_deployments_workflows.py | delete_deployments_workflows.py |
MIT License
Copyright (c) 2023 redphx
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
| APIxoo | /APIxoo-0.3.1.tar.gz/APIxoo-0.3.1/LICENSE.md | LICENSE.md |
[](https://pypi.org/project/APIxoo/)
[](https://opensource.org/licenses/MIT)
# APIxoo
Python package to interact with Divoom Pixoo app's server.
Unlike other packages, this one will only focus on interacting with Divoom Pixoo's server.
For ESP32/Arduino, check [redphx/DivoomClient](https://github.com/redphx/DivoomClient).
## Features
- [x] Login
- [x] Decode Divoom's animation formats to GIFs (16x16, 32x32, 64x64).
- [x] Get animation by ID
- [x] Get animations by Category
- [x] Get animations by Album
- [ ] Get animations by User
- [ ] Search animations & users
- [ ] Like/Dislike animation
- [ ] Comment on animation
- [ ] Upload animations
## Install
```
pip install APIxoo
```
## Example
```python
from apixoo import APIxoo, GalleryCategory, GalleryDimension
# Divoom account
EMAIL = '[email protected]'
MD5_PASSWORD = 'deadc0ffee...'
# Also accept password string with "password='password'"
api = APIxoo(EMAIL, md5_password=MD5_PASSWORD)
status = api.log_in()
if not status:
print('Login error!')
else:
files = api.get_category_files(
GalleryCategory.RECOMMEND,
dimension=GalleryDimension.W64H64,
page=1,
per_page=20,
)
for info in files:
print(info)
pixel_bean = api.download(info)
if pixel_bean:
pixel_bean.save_to_gif(f'{info.gallery_id}.gif', scale=5)
```
*To be updated...*
| APIxoo | /APIxoo-0.3.1.tar.gz/APIxoo-0.3.1/README.md | README.md |
from enum import Enum
class GalleryCategory(int, Enum):
NEW = 0
DEFAULT = 1
# LED_TEXT = 2
CHARACTER = 3
EMOJI = 4
DAILY = 5
NATURE = 6
SYMBOL = 7
PATTERN = 8
CREATIVE = 9
PHOTO = 12
TOP = 14
GADGET = 15
BUSINESS = 16
FESTIVAL = 17
RECOMMEND = 18
# PLANET = 19
FOLLOW = 20
# REVIEW_PHOTOS = 21
# REVIEW_STOLEN_PHOTOS = 22
# FILL_GAME = 29
PIXEL_MATCH = 30 # Current event
PLANT = 31
ANIMAL = 32
PERSON = 33
EMOJI_2 = 34
FOOD = 35
# OTHERS = 36
# REPORT_PHOTO = 254
# CREATION_ALBUM = 255
class GalleryType(int, Enum):
PICTURE = 0
ANIMATION = 1
MULTI_PICTURE = 2
MULTI_ANIMATION = 3
LED = 4
ALL = 5
SAND = 6
DESIGN_HEAD_DEVICE = 101
DESIGN_IMPORT = 103
DESIGN_CHANNEL_DEVICE = 104
class GallerySorting(int, Enum):
NEW_UPLOAD = 0
MOST_LIKED = 1
class GalleryDimension(int, Enum):
W16H16 = 1
W32H32 = 2
W64H64 = 4
ALL = 15
class Server(str, Enum):
API = 'app.divoom-gz.com'
FILE = 'f.divoom-gz.com'
class ApiEndpoint(str, Enum):
GET_ALBUM_LIST = '/Discover/GetAlbumList'
GET_ALBUM_FILES = '/Discover/GetAlbumImageList'
GET_CATEGORY_FILES = '/GetCategoryFileListV2'
GET_GALLERY_INFO = '/Cloud/GalleryInfo'
USER_LOGIN = '/UserLogin'
class BaseDictInfo(dict):
_KEYS_MAP = {}
def __init__(self, info: dict):
# Rename keys
for key in self._KEYS_MAP:
self.__dict__[self._KEYS_MAP[key]] = info.get(key)
# Make this object JSON serializable
dict.__init__(self, **self.__dict__)
def __setattr__(self, name, value):
raise Exception('%s object is read only!' % (type(self).__name__))
class AlbumInfo(BaseDictInfo):
_KEYS_MAP = {
'AlbumId': 'album_id',
'AlbumName': 'album_name',
'AlbumImageId': 'album_image_id',
'AlbumBigImageId': 'album_big_image_id',
}
class UserInfo(BaseDictInfo):
_KEYS_MAP = {
'UserId': 'user_id',
'UserName': 'user_name',
}
class GalleryInfo(BaseDictInfo):
_KEYS_MAP = {
'Classify': 'category',
'CommentCnt': 'total_comments',
'Content': 'content',
'CopyrightFlag': 'copyright_flag',
'CountryISOCode': 'country_iso_code',
'Date': 'date',
'FileId': 'file_id',
'FileName': 'file_name',
'FileTagArray': 'file_tags',
'FileType': 'file_type',
'FileURL': 'file_url',
'GalleryId': 'gallery_id',
'LikeCnt': 'total_likes',
'ShareCnt': 'total_shares',
'WatchCnt': 'total_views',
# 'AtList': [],
# 'CheckConfirm': 2,
# 'CommentUTC': 0,
# 'FillGameIsFinish': 0,
# 'FillGameScore': 0,
# 'HideFlag': 0,
# 'IsAddNew': 1,
# 'IsAddRecommend': 0,
# 'IsDel': 0,
# 'IsFollow': 0,
# 'IsLike': 0,
# 'LayerFileId': '',
# 'Level': 7,
# 'LikeUTC': 1682836986,
# 'MusicFileId': '',
# 'OriginalGalleryId': 0,
# 'PixelAmbId': '',
# 'PixelAmbName': '',
# 'PrivateFlag': 0,
# 'RegionId': '55',
# 'UserHeaderId': 'group1/M00/1B/BF/...',
}
def __init__(self, info: dict):
super().__init__(info)
# Parse user info
self.__dict__['user'] = None
if 'UserId' in info:
self.__dict__['user'] = UserInfo(info)
# Update dict
dict.__init__(self, **self.__dict__) | APIxoo | /APIxoo-0.3.1.tar.gz/APIxoo-0.3.1/apixoo/const.py | const.py |
from typing import Union
from PIL import Image
class PixelBean(object):
@property
def total_frames(self):
return self._total_frames
@property
def speed(self):
return self._speed
@property
def row_count(self):
return self._row_count
@property
def column_count(self):
return self._column_count
@property
def palettes(self):
return self._palettes
@property
def frames_data(self):
return self._frames_data
@property
def width(self):
return self._width
@property
def height(self):
return self._height
def __init__(
self,
total_frames: int,
speed: int,
row_count: int,
column_count: int,
palettes: list,
frames_data: list,
):
self._total_frames = total_frames
self._speed = speed
self._row_count = row_count
self._column_count = column_count
self._palettes = palettes
self._frames_data = frames_data
self._width = column_count * 16
self._height = row_count * 16
def _resize(
self,
img: Image,
scale: Union[int, float] = 1,
target_width: int = None,
target_height: int = None,
) -> Image:
"""Resize frame image"""
# Default params -> don't do anything
if scale == 1 and not target_width and not target_height:
return img
org_width = img.width
org_height = img.height
width = img.width
height = img.height
if scale != 1:
width = round(width * scale)
height = round(height * scale)
elif target_width or target_height:
# Set specific width/height
if target_width and target_height:
width = target_width
height = target_height
elif target_width and not target_height:
width = target_width
height = round((target_width * org_height) / org_width)
elif target_height and not target_width:
width = round((target_height * org_width) / org_height)
height = target_height
# Resize image if needed
if width != org_width or height != org_height:
img = img.resize((width, height), Image.NEAREST)
return img
def get_frame_image(
self,
frame_number: int,
scale: Union[int, float] = 1,
target_width: int = None,
target_height: int = None,
) -> Image:
"""Get Pillow Image of a frame"""
if frame_number <= 0 or frame_number > self.total_frames:
raise Exception('Frame number out of range!')
frame_data = self._frames_data[frame_number - 1]
img = Image.new('RGB', (self._width, self._height))
for y in range(self._row_count * 16):
for x in range(self.column_count * 16):
palette_index = frame_data[y][x]
rgb = self._palettes[palette_index]
img.putpixel((x, y), rgb)
img = self._resize(
img, scale=scale, target_width=target_width, target_height=target_height
)
return img
def save_to_gif(
self,
output_path: str,
scale: Union[int, float] = 1,
target_width: int = None,
target_height: int = None,
) -> None:
"""Convert animation to GIF file"""
gif_frames = []
for frame_number in range(self._total_frames):
img = self.get_frame_image(
frame_number + 1,
scale=scale,
target_width=target_width,
target_height=target_height,
)
gif_frames.append(img)
# Save to GIF
gif_frames[0].save(
output_path,
append_images=gif_frames[1:],
duration=self._speed,
save_all=True,
optimize=False,
interlace=False,
loop=0,
disposal=0,
) | APIxoo | /APIxoo-0.3.1.tar.gz/APIxoo-0.3.1/apixoo/pixel_bean.py | pixel_bean.py |
import json
from enum import Enum
from io import IOBase
from struct import unpack
import lzo
from Crypto.Cipher import AES
from .pixel_bean import PixelBean
class FileFormat(Enum):
PIC_MULTIPLE = 17
ANIM_SINGLE = 9 # 16x16
ANIM_MULTIPLE = 18 # 32x32 or 64x64
ANIM_MULTIPLE_64 = 26 # 64x64, new format
class BaseDecoder(object):
AES_SECRET_KEY = '78hrey23y28ogs89'
AES_IV = '1234567890123456'.encode('utf8')
def __init__(self, fp: IOBase):
self._fp = fp
def decode() -> PixelBean:
raise Exception('Not implemented!')
def _decrypt_aes(self, data):
cipher = AES.new(
self.AES_SECRET_KEY.encode('utf8'),
AES.MODE_CBC,
self.AES_IV,
)
return cipher.decrypt(data)
def _compact(self, frames_data, total_frames, row_count=1, column_count=1):
frame_size = row_count * column_count * 16 * 16 * 3
palettes = []
frames_compact = json.loads(
json.dumps(
[[[None] * (column_count * 16)] * (row_count * 16)] * total_frames
)
)
for current_frame, frame_data in enumerate(frames_data):
pos = 0
x = 0
y = 0
grid_x = 0
grid_y = 0
while pos < frame_size:
r, g, b = unpack('BBB', frame_data[pos : pos + 3])
rgb = (r, g, b)
try:
palette_index = palettes.index(rgb)
except ValueError:
palettes.append(rgb)
palette_index = len(palettes) - 1
real_x = x + (grid_x * 16)
real_y = y + (grid_y * 16)
frames_compact[current_frame][real_y][real_x] = palette_index
x += 1
pos += 3
if (pos / 3) % 16 == 0:
x = 0
y += 1
if (pos / 3) % 256 == 0:
x = 0
y = 0
grid_x += 1
if grid_x == row_count:
grid_x = 0
grid_y += 1
return (palettes, frames_compact)
class AnimSingleDecoder(BaseDecoder):
def decode(self) -> PixelBean:
content = b'\x00' + self._fp.read() # Add back the first byte (file type)
# Re-arrange data
encrypted_data = bytearray(len(content) - 4)
for i in range(len(content)):
encrypted_data[i - 4] = content[i]
row_count = 1
column_count = 1
speed = unpack('>H', content[2:4])[0]
# Decrypt AES
decrypted_data = self._decrypt_aes(encrypted_data)
total_frames = len(decrypted_data) // 768
# Parse frames data
frames_data = []
for i in range(total_frames):
pos = i * 768
frames_data.append(decrypted_data[pos : pos + 768])
# Compact data
palettes, frames_compact = self._compact(frames_data, total_frames)
return PixelBean(
total_frames,
speed,
row_count,
column_count,
palettes,
frames_compact,
)
class AnimMultiDecoder(BaseDecoder):
def decode(self) -> PixelBean:
total_frames, speed, row_count, column_count = unpack('>BHBB', self._fp.read(5))
encrypted_data = self._fp.read()
return self._decode_frames_data(
encrypted_data, total_frames, speed, row_count, column_count
)
def _decode_frames_data(
self, encrypted_data, total_frames, speed, row_count, column_count
):
width = 16 * column_count
height = 16 * row_count
data = self._decrypt_aes(encrypted_data)
uncompressed_frame_size = width * height * 3
pos = 0
frames_data = [] * total_frames
for current_frame in range(total_frames):
frame_size = unpack('>I', data[pos : pos + 4])[0]
pos += 4
frame_data = lzo.decompress(
data[pos : pos + frame_size], False, uncompressed_frame_size
)
pos += frame_size
frames_data.append(frame_data)
palettes, frames_compact = self._compact(
frames_data, total_frames, row_count, column_count
)
return PixelBean(
total_frames,
speed,
row_count,
column_count,
palettes,
frames_compact,
)
class AnimMulti64Decoder(BaseDecoder):
def _get_dot_info(self, data, pos, pixel_idx, bVar9):
if not data[pos:]:
return -1
uVar2 = bVar9 * pixel_idx & 7
uVar4 = bVar9 * pixel_idx * 65536 >> 0x13
if bVar9 < 9:
uVar3 = bVar9 + uVar2
if uVar3 < 9:
uVar6 = data[pos + uVar4] << (8 - uVar3 & 0xFF) & 0xFF
uVar6 >>= uVar2 + (8 - uVar3) & 0xFF
else:
uVar6 = data[pos + uVar4 + 1] << (0x10 - uVar3 & 0xFF) & 0xFF
uVar6 >>= 0x10 - uVar3 & 0xFF
uVar6 &= 0xFFFF
uVar6 <<= 8 - uVar2 & 0xFF
uVar6 |= data[pos + uVar4] >> uVar2
else:
raise Exception('(2) Unimplemented')
return uVar6
def _decode_frame_data(self, data):
output = [None] * 12288
encrypt_type = data[5]
if encrypt_type != 0x0C:
raise Exception('Unsupported %s' % encrypt_type)
uVar13 = data[6]
iVar11 = uVar13 * 3
if uVar13 == 0:
bVar9 = 8
iVar11 = 768 # Fix corrupted frame
else:
bVar9 = 0xFF
bVar15 = 1
while True:
if (uVar13 & 1) != 0:
bVar18 = bVar9 == 0xFF
bVar9 = bVar15
if bVar18:
bVar9 = bVar15 - 1
uVar14 = uVar13 & 0xFFFE
bVar15 = bVar15 + 1
uVar13 = uVar14 >> 1
if uVar14 == 0:
break
pixel_idx = 0
pos = (iVar11 + 8) & 0xFFFF
while True:
color_index = self._get_dot_info(data, pos, pixel_idx & 0xFFFF, bVar9)
target_pos = pixel_idx * 3
if color_index == -1: # transparent -> black
output[target_pos] = 0
output[target_pos + 1] = 0
output[target_pos + 2] = 0
else:
color_pos = 8 + color_index * 3
output[target_pos] = data[color_pos]
output[target_pos + 1] = data[color_pos + 1]
output[target_pos + 2] = data[color_pos + 2]
pixel_idx += 1
if pixel_idx == 4096: # 64x64
break
return bytearray(output)
def decode(self) -> PixelBean:
total_frames, speed, row_count, column_count = unpack('>BHBB', self._fp.read(5))
frames_data = [] * total_frames
for frame in range(total_frames):
size = unpack('>I', self._fp.read(4))[0]
frame_data = self._decode_frame_data(self._fp.read(size))
frames_data.append(frame_data)
palettes, frames_compact = self._compact(
frames_data, total_frames, row_count, column_count
)
return PixelBean(
total_frames,
speed,
row_count,
column_count,
palettes,
frames_compact,
)
pass
class PicMultiDecoder(BaseDecoder):
def decode(self) -> PixelBean:
row_count, column_count, length = unpack('>BBI', self._fp.read(6))
encrypted_data = self._fp.read()
width = 16 * column_count
height = 16 * row_count
uncompressed_frame_size = width * height * 3
data = self._decrypt_aes(encrypted_data)
frame_data = lzo.decompress(data[:length], False, uncompressed_frame_size)
frames_data = [frame_data]
total_frames = 1
speed = 40
palettes, frames_compact = self._compact(
frames_data, total_frames, row_count, column_count
)
# TODO: support showing string when remaning length > 20
return PixelBean(
total_frames,
speed,
row_count,
column_count,
palettes,
frames_compact,
)
class PixelBeanDecoder(object):
def decode_file(file_path: str) -> PixelBean:
with open(file_path, 'rb') as fp:
return PixelBeanDecoder.decode_stream(fp)
def decode_stream(fp: IOBase) -> PixelBean:
try:
file_format = unpack('B', fp.read(1))[0]
file_format = FileFormat(file_format)
except Exception:
print(f'Unsupported file format: {file_format}')
return None
if file_format == FileFormat.ANIM_SINGLE:
return AnimSingleDecoder(fp).decode()
elif file_format == FileFormat.ANIM_MULTIPLE:
return AnimMultiDecoder(fp).decode()
elif file_format == FileFormat.PIC_MULTIPLE:
return PicMultiDecoder(fp).decode()
elif file_format == FileFormat.ANIM_MULTIPLE_64:
return AnimMulti64Decoder(fp).decode() | APIxoo | /APIxoo-0.3.1.tar.gz/APIxoo-0.3.1/apixoo/pixel_bean_decoder.py | pixel_bean_decoder.py |
import hashlib
from typing import Union
import requests
from .const import (
AlbumInfo,
ApiEndpoint,
GalleryCategory,
GalleryDimension,
GalleryInfo,
GallerySorting,
GalleryType,
Server,
)
from .pixel_bean import PixelBean
from .pixel_bean_decoder import PixelBeanDecoder
class APIxoo(object):
HEADERS = {
'User-Agent': 'Aurabox/3.1.10 (iPad; iOS 14.8; Scale/2.00)',
}
def __init__(
self, email: str, password: str = None, md5_password: str = None, is_secure=True
):
# Make sure at least one password param is passed
if not any([password, md5_password]):
raise Exception('Empty password!')
# Get MD5 hash of password
if password:
md5_password = hashlib.md5(password).hexdigest()
self._email = email
self._md5_password = md5_password
self._user = None
self._request_timeout = 10
self._is_secure = is_secure
def _full_url(self, path: str, server: Server = Server.API) -> str:
"""Generate full URL from path"""
if not path.startswith('/'):
path = '/' + path
protocol = 'https://' if self._is_secure else 'http://'
return '%s%s%s' % (protocol, server.value, path)
def _send_request(self, endpoint: ApiEndpoint, payload: dict = {}):
"""Send request to API server"""
if endpoint != ApiEndpoint.USER_LOGIN:
payload.update(
{
'Token': self._user['token'],
'UserId': self._user['user_id'],
}
)
full_url = self._full_url(endpoint.value, Server.API)
resp = requests.post(
full_url,
headers=self.HEADERS,
json=payload,
timeout=self._request_timeout,
)
return resp.json()
def set_timeout(self, timeout: int):
"""Set request timeout"""
self._request_timeout = timeout
def is_logged_in(self) -> bool:
"""Check if logged in or not"""
return self._user is not None
def log_in(self) -> bool:
"""Log in to API server"""
if self.is_logged_in():
return True
payload = {
'Email': self._email,
'Password': self._md5_password,
}
try:
resp_json = self._send_request(ApiEndpoint.USER_LOGIN, payload)
self._user = {
'user_id': resp_json['UserId'],
'token': resp_json['Token'],
}
return True
except Exception:
pass
return False
def get_gallery_info(self, gallery_id: int) -> GalleryInfo:
"""Get gallery info by ID"""
if not self.is_logged_in():
raise Exception('Not logged in!')
payload = {
'GalleryId': gallery_id,
}
try:
resp_json = self._send_request(ApiEndpoint.GET_GALLERY_INFO, payload)
if resp_json['ReturnCode'] != 0:
return None
# Add gallery ID since it isn't included in the response
resp_json['GalleryId'] = gallery_id
return GalleryInfo(resp_json)
except Exception:
return None
def get_category_files(
self,
category: Union[int, GalleryCategory],
dimension: GalleryDimension = GalleryDimension.W32H32,
sort: GallerySorting = GallerySorting.MOST_LIKED,
file_type: GalleryType = GalleryType.ALL,
page: int = 1,
per_page: int = 20,
) -> list:
"""Get a list of galleries by Category"""
if not self.is_logged_in():
raise Exception('Not logged in!')
start_num = ((page - 1) * per_page) + 1
end_num = start_num + per_page - 1
payload = {
'StartNum': start_num,
'EndNum': end_num,
'Classify': category,
'FileSize': dimension,
'FileType': file_type,
'FileSort': sort,
'Version': 12,
'RefreshIndex': 0,
}
try:
resp_json = self._send_request(ApiEndpoint.GET_CATEGORY_FILES, payload)
lst = []
for item in resp_json['FileList']:
lst.append(GalleryInfo(item))
return lst
except Exception:
return None
def get_album_list(self) -> list:
"""Get Album list in Discover tab"""
if not self.is_logged_in():
raise Exception('Not logged in!')
try:
resp_json = self._send_request(ApiEndpoint.GET_ALBUM_LIST)
if resp_json['ReturnCode'] != 0:
return None
lst = []
for item in resp_json['AlbumList']:
lst.append(AlbumInfo(item))
return lst
except Exception:
return None
def get_album_files(self, album_id: int, page: int = 1, per_page: int = 20):
"""Get a list of galleries by Album"""
start_num = ((page - 1) * per_page) + 1
end_num = start_num + per_page - 1
payload = {
'AlbumId': album_id,
'StartNum': start_num,
'EndNum': end_num,
}
try:
resp_json = self._send_request(ApiEndpoint.GET_ALBUM_FILES, payload)
lst = []
for item in resp_json['FileList']:
lst.append(GalleryInfo(item))
return lst
except Exception:
return None
def download(self, gallery_info: GalleryInfo) -> PixelBean:
"""Download and decode animation"""
url = self._full_url(gallery_info.file_id, server=Server.FILE)
resp = requests.get(
url, headers=self.HEADERS, stream=True, timeout=self._request_timeout
)
return PixelBeanDecoder.decode_stream(resp.raw) | APIxoo | /APIxoo-0.3.1.tar.gz/APIxoo-0.3.1/apixoo/__init__.py | __init__.py |
import asyncio
import copy
import logging
import ssl
from typing import Awaitable, Dict, Hashable, List, Tuple, Union
import aiohttp
from . import parsing
from .entities import PackageBase, PackageVariant, PackageVersion
__all__ = ["package_search_match", "generate_download_url"]
QUERY_URL: str = "https://www.apkmirror.com"
QUERY_PARAMS: Dict[str, str] = {
"post_type": "app_release",
"searchtype": "apk",
"s": "",
"minapi": "true",
}
HEADERS = {
"user-agent": "apksearch APKMirrorSearcher/1.0.0",
}
logger = logging.getLogger(__name__)
async def gather_from_dict(tasks: Dict[Hashable, Awaitable], return_exceptions=False):
results = await asyncio.gather(*tasks.values(), return_exceptions=return_exceptions)
return dict(zip(tasks.keys(), results))
def _generate_params_list(packages: List[str]) -> List[str]:
param_list = []
for package in packages:
params = copy.copy(QUERY_PARAMS)
params["s"] = package
param_list.append(params)
return param_list
def package_search(packages: List[str]) -> Dict[str, PackageBase]:
"""Entrypoint for performing the search"""
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return loop.run_until_complete(package_search_async(packages))
async def package_search_async(packages: List[str]) -> Dict[str, PackageBase]:
"""Entrypoint for performing the search async"""
search_results = await execute_package_search(packages)
package_defs = parsing.process_search_result(search_results)
logger.debug("Packages found: %s", ",".join(list(package_defs.keys())))
release_defs = await execute_release_info(package_defs)
parsing.process_release_result(release_defs)
variant_defs = await execute_variant_info(package_defs)
parsing.process_variant_result(variant_defs)
return package_defs
async def package_search_match(package_url: [str], versions: List[str]) -> PackageBase:
"""Perform a targeted search on a root page
:param package_url: URL to the package
:param version: Version string to process
"""
package_defs = await execute_package_page([package_url])
package_name = list(package_defs.keys())[0]
for pkg_version in list(package_defs[package_name].versions.keys())[:]:
if pkg_version not in versions:
del package_defs[package_name].versions[pkg_version]
if len(package_defs[package_name].versions) != len(versions):
diff = set(versions).difference(set(package_defs[package_name].versions))
raise RuntimeError("{} is missing {}".format(package_name, diff))
release_defs = await execute_release_info(package_defs)
parsing.process_release_result(release_defs)
return package_defs[package_name]
async def generate_download_url(variant: PackageVariant) -> str:
"""Generates a packages temporary download URL
:param variant: Variant to determine URL
"""
results = await _perform_basic_query([variant.variant_info])
variant_defs = {variant: results[0]}
parsing.process_variant_result(variant_defs)
results = await _perform_basic_query([variant.variant_download_page])
download_results = {variant: results[0]}
parsing.process_variant_download_result(download_results)
return variant.download_url
async def execute_package_search(packages: List[str]) -> List[str]:
"""Perform aiohttp requests to APKMirror
:param list packages: Packages that will be searched for. Each package will generate a new
request
:return: A list of results containing the first page of each package search
:rtype: list
"""
param_list: List[str] = _generate_params_list(packages)
return await _perform_search(param_list)
async def execute_package_page(packages: List[str]) -> Dict[str, PackageBase]:
"""Query all root package pages
:param packages: List of root package pages to query
"""
results = await _perform_basic_query(packages)
return parsing.process_package_page(results)
async def execute_release_info(packages: Dict[str, PackageBase]) -> Dict[PackageVersion, str]:
"""Execute all requests related to the package versions
:param dict package_defs: Current found information from the initial search. It will be updated
in place with the release information found during the step
"""
releases = []
for info in packages.values():
for package_version in info.versions.values():
releases.append(package_version)
return await _perform_dict_lookup(releases)
async def execute_variant_info(packages: Dict[str, PackageBase]) -> Dict[PackageVersion, str]:
variants = []
for info in packages.values():
for package_version in info.versions.values():
for arch in package_version.arch.values():
variants.extend(arch)
return await _perform_dict_lookup(variants)
async def gather_release_info(releases: List[PackageBase]) -> Tuple[PackageVersion, PackageVariant, str]:
loop = asyncio.get_running_loop()
results = loop.run_until_complete(_perform_dict_lookup(releases))
return results
async def _fetch_one(session, url, params):
async with session.get(url, ssl=ssl.SSLContext(), params=params, headers=HEADERS) as response:
logger.debug("About to query %s", response.request_info)
return await response.text()
async def _perform_search(query_params: List[str]):
loop = asyncio.get_running_loop()
async with aiohttp.ClientSession(loop=loop) as session:
required_urls = [_fetch_one(session, QUERY_URL, param) for param in query_params]
logger.info("About to query %s packages", len(required_urls))
results = await asyncio.gather(
*required_urls,
return_exceptions=True,
)
return results
async def _perform_basic_query(urls: List[str]):
async with aiohttp.ClientSession() as session:
required_urls = [_fetch_one(session, url, {}) for url in urls]
logger.info("About to query %s packages", len(required_urls))
results = await asyncio.gather(
*required_urls,
return_exceptions=True,
)
return results
async def _perform_dict_lookup(requests: List[Union[PackageVersion, PackageVariant]]):
if len(requests) == 0:
return []
if isinstance(requests[0], PackageVersion):
identifier = "releases"
url_attr = "link"
else:
identifier = "variants"
url_attr = "variant_download_page"
loop = asyncio.get_running_loop()
async with aiohttp.ClientSession(loop=loop) as session:
tasks = {}
logger.info("About to query %s %s", len(requests), identifier)
for request in requests:
tasks[request] = _fetch_one(session, getattr(request, url_attr), {})
results = await gather_from_dict(tasks)
return results | APKMirror-Search | /APKMirror_Search-1.0.1-py3-none-any.whl/apksearch/search.py | search.py |
from typing import Tuple
import numpy as np
from numpy.random import default_rng
from sklearn.gaussian_process.kernels import Kernel
from .posterior_approximation import LogLikelihood, laplace_approximation
from .acquisitions import Acquisition
from .gaussian_process import gaussian_process_conditional
from .utils import transfer_id_from_query_to_explored
class ActivePreferenceLearning:
def __init__(
self,
kernel: Kernel,
loglikelihood: LogLikelihood,
acquisition: Acquisition,
random_state: int = 0,
):
self.kernel = kernel
self.loglikelihood = loglikelihood
self.acquisition = acquisition
self.rng = default_rng(random_state)
def query(
self,
X: np.ndarray,
explored_item_idx: np.ndarray,
query_item_idx: np.ndarray,
mu=None,
pair_selections=None,
) -> Tuple[int, int, np.ndarray, np.ndarray, np.ndarray]:
if (
len(explored_item_idx) == 0
): # first query, just pick two randomly from the query set
return self.first_query(
query_item_idx=query_item_idx,
explored_item_idx=explored_item_idx,
)
else:
return self.subsequent_query(
X, explored_item_idx, query_item_idx, mu, pair_selections
)
def first_query(
self, query_item_idx: np.ndarray, explored_item_idx: np.ndarray
) -> Tuple[int, int, np.ndarray, np.ndarray, np.ndarray]:
opt1_idx, opt2_idx = self.rng.choice(query_item_idx, size=2)
for idx in (opt1_idx, opt2_idx):
(
query_item_idx,
explored_item_idx,
) = transfer_id_from_query_to_explored(
idx, query_item_idx, explored_item_idx
)
return (
opt1_idx,
opt2_idx,
explored_item_idx,
query_item_idx,
np.zeros(len(explored_item_idx)),
)
def subsequent_query(
self,
X: np.ndarray,
explored_item_idx: np.ndarray,
query_item_idx: np.ndarray,
mu=None,
pair_selections=None,
) -> Tuple[int, int, np.ndarray, np.ndarray, np.ndarray]:
X_train = X[explored_item_idx]
cov = self.kernel(X_train)
self.loglikelihood.register_data(pair_selections)
mu_map, _ = laplace_approximation(mu, cov, self.loglikelihood)
mu_query, s2_query = gaussian_process_conditional(
X_train, mu_map, X[query_item_idx], self.kernel
)
acquisitions_on_query_set = self.acquisition(
mu_query, s2_query, **{"mu_max": mu_map.max()}
)
opt1_idx = explored_item_idx[pair_selections[-1, 0].item()]
opt2_idx = query_item_idx[np.argmax(acquisitions_on_query_set).item()]
(
query_item_idx,
explored_item_idx,
) = transfer_id_from_query_to_explored(
opt2_idx, query_item_idx, explored_item_idx
)
x = X[opt2_idx].reshape(1, -1)
k_star = self.kernel(X_train, x)
mu_x = k_star.T @ np.linalg.solve(cov, mu_map)
return (
opt1_idx,
opt2_idx,
explored_item_idx,
query_item_idx,
np.append(mu_map, mu_x),
) | APL-Brochu | /APL_Brochu-0.0.1-py3-none-any.whl/apl/apl.py | apl.py |
from typing import Tuple, Any
import numpy as np
from scipy.stats import multivariate_normal
import scipy as sp
ROOT_TWO = np.sqrt(2)
class LogLikelihood:
def __init__(self) -> None:
self.D = None
def __call__(self, f_x: np.ndarray) -> Any:
raise NotImplementedError
def register_data(self, D: np.ndarray) -> None:
self.D = D
def _get_diffs(self, f_x: np.ndarray) -> np.ndarray:
return np.apply_along_axis(
lambda pair: -np.diff(f_x.flatten()[pair]),
arr=self.D,
axis=1,
)
class ProbitDataGivenF(LogLikelihood):
def __init__(self, sigma_err: float) -> None:
super().__init__()
self.σ_err_ = sigma_err
def __call__(self, f_x: np.ndarray) -> Any:
return np.sum(
sp.special.log_ndtr(
self._get_diffs(f_x) / (ROOT_TWO * self.σ_err_)
)
)
def laplace_approximation(
μ: np.ndarray, Σ: np.ndarray, loglikelihood: LogLikelihood
) -> Tuple[np.ndarray, np.ndarray]:
"""Approximate the posterior distribution using Laplace's approximation.
The posterior is the simple summation of logpdf of the prior which
is a multivariate normal, and the log likelihood of data given the
prior. Since scipy's optimization module handles minimization problems
the posterior is negated before fed into that function.
Parameters
----------
μ: np.ndarray
The mean of the prior multivariate distribution
Σ: np.ndarray
The covariance matrix of the prior multivariate distribution
loglikelihood : LogLikelihood
A callable instance of the log-likelihood class chosen by the user.
Returns
-------
Tuple[np.ndarray, np.ndarray]
The Maximum A Posteriori values of the mean and covariance.
"""
log_pf = multivariate_normal(mean=μ, cov=Σ).logpdf
neg_posterior = lambda f_x: -(log_pf(f_x) + loglikelihood(f_x))
neg_posterior_opt_res = sp.optimize.minimize(
neg_posterior, μ, method="BFGS"
)
μ_map, Σ_map = neg_posterior_opt_res.x, neg_posterior_opt_res.hess_inv
return μ_map, Σ_map | APL-Brochu | /APL_Brochu-0.0.1-py3-none-any.whl/apl/posterior_approximation.py | posterior_approximation.py |
class A():
## مرحباً بالضوء، كيف أحوالك؟
'''# To use:
APL.Reshape_Arabic(Any text, To reverse type #True#)'''
def Reshape_Arabic(text = '', reverse = False):
harakat = "ًٌٍَُِّْ"
list1 = 'ئبتثجحخسشصضطظعغفقكلمنهي'
list2 = 'آأؤإاةدذرزوى'
list3 = 'ء '
v = 0
reshaped_text = ''
textlist = list(' '+text+' ') #هذه الخطوة ضرورية ليعمل الكود بشكل صحيح
letters_Table = {''' '<initial>' '<medial>' '<final>' '<isolated>' '''
"ء" : [u"\ufe80", u"\ufe80", u"\ufe80", u"\ufe80"], #ء Xإن محيت الهمزة من هنا فستحدث مشكلة
"آ" : [u"\ufe81", u"\ufe82", u"\ufe82", u"\ufe81"], #آ
"أ" : [u"\ufe83", u"\ufe84", u"\ufe84", u"\ufe83"], #أ
"ؤ" : [u"\ufe85", u"\ufe86", u"\ufe86", u"\ufe85"], #ؤ
"إ" : [u"\ufe87", u"\ufe88", u"\ufe88", u"\ufe87"], #إ
"ئ" : [u"\ufe8b", u"\ufe8c", u"\ufe8a", u"\ufe89"], #ئ
"ا" : [u"\ufe8d", u"\ufe8e", u"\ufe8e", u"\ufe8d"], #ا
"ب" : [u"\ufe91", u"\ufe92", u"\ufe90", u"\ufe8f"], #ب
"ة" : [u"\ufe93", u"\ufe94", u"\ufe94", u"\ufe93"], #ة
"ت" : [u"\ufe97", u"\ufe98", u"\ufe96", u"\ufe95"], #ت
"ث" : [u"\ufe9b", u"\ufe9c", u"\ufe9a", u"\ufe99"], #ث
"ج" : [u"\ufe9f", u"\ufea0", u"\ufe9e", u"\ufe9d"], #ج
"ح" : [u"\ufea3", u"\ufea4", u"\ufea2", u"\ufea1"], #ح
"خ" : [u"\ufea7", u"\ufea8", u"\ufea6", u"\ufea5"], #خ
"د" : [u"\ufea9", u"\ufeaa", u"\ufeaa", u"\ufea9"], #د
"ذ" : [u"\ufeab", u"\ufeac", u"\ufeac", u"\ufeab"], #ذ
"ر" : [u"\ufead", u"\ufeae", u"\ufeae", u"\ufead"], #ر
"ز" : [u"\ufeaf", u"\ufeb0", u"\ufeb0", u"\ufeaf"], #ز
"س" : [u"\ufeb3", u"\ufeb4", u"\ufeb2", u"\ufeb1"], #س
"ش" : [u"\ufeb7", u"\ufeb8", u"\ufeb6", u"\ufeb5"], #ش
"ص" : [u"\ufebb", u"\ufebc", u"\ufeba", u"\ufeb9"], #ص
"ض" : [u"\ufebf", u"\ufec0", u"\ufebe", u"\ufebd"], #ض
"ط" : [u"\ufec3", u"\ufec4", u"\ufec2", u"\ufec1"], #ط
"ظ" : [u"\ufec7", u"\ufec8", u"\ufec6", u"\ufec5"], #ظ
"ع" : [u"\ufecb", u"\ufecc", u"\ufeca", u"\ufec9"], #ع
"غ" : [u"\ufecf", u"\ufed0", u"\ufece", u"\ufecd"], #غ
"ف" : [u"\ufed3", u"\ufed4", u"\ufed2", u"\ufed1"], #ف
"ق" : [u"\ufed7", u"\ufed8", u"\ufed6", u"\ufed5"], #ق
"ك" : [u"\ufedb", u"\ufedc", u"\ufeda", u"\ufed9"], #ك
"ل" : [u"\ufedf", u"\ufee0", u"\ufede", u"\ufedd"], #ل
"م" : [u"\ufee3", u"\ufee4", u"\ufee2", u"\ufee1"], #م
"ن" : [u"\ufee7", u"\ufee8", u"\ufee6", u"\ufee5"], #ن
"ه" : [u"\ufeeb", u"\ufeec", u"\ufeea", u"\ufee9"], #ه
"و" : [u"\ufeed", u"\ufeee", u"\ufeee", u"\ufeed"], #و
"ى" : [u"\ufeef", u"\ufef0", u"\ufef0", u"\ufeef"], #ى
"ي" : [u"\ufef3", u"\ufef4", u"\ufef2", u"\ufef1"], #ي
}
for i in range(1, len(textlist)-1):
#تقرير إن كان الحرف متصلا بما قبله أم لا
aroundbefore = 1
while textlist[i-aroundbefore] in harakat:
aroundbefore += 1
if textlist[i-aroundbefore] in list1:
before = 1
else:
before = 0
#تقرير إن كان الحرف متصلا بما بعده أم لا
aroundafter = 1
while textlist[i+aroundafter] in harakat:
aroundafter += 1
if textlist[i] in list1 and textlist[i+aroundafter] in list1 or textlist[i] in list1 and textlist[i+aroundafter] in list2:
after = 1
else:
after = 0
if textlist[i] not in letters_Table: #إن لم يكن في الجدول
if textlist[i] == 'ء': #وضعت الهمزة هنا لأنها لم تعمل في الجدول
new_text = u"\ufe80"
else:
new_text = textlist[i] #إن لم يكن في الجدول اترك الحرف كما هو
else:
#إن كان في الجدول فحدد شكله
if before == 0 and after == 1: #أول الكلمة
new_text = letters_Table[textlist[i]][0]
if before == 1 and after == 1: #وسط الكلمة
new_text = letters_Table[textlist[i]][1]
if before == 1 and after == 0: #آخر الكلمة
new_text = letters_Table[textlist[i]][2]
if before == 0 and after == 0: #منفصل
new_text = letters_Table[textlist[i]][3]
reshaped_text += str(new_text) #أضف الحرف لمتغير واحد
new_text = '' #ارجع قيمة المتغير الذي يأخذ قيمة الحرف عدما كي لا تتراكم الأحرف فيه
#لاستبدال الألف واللام المنفصلين بحرف متصل
reshaped_text = reshaped_text.replace('ﻟﺂ', 'ﻵ')
reshaped_text = reshaped_text.replace('ﻠﺂ', 'ﻶ')
reshaped_text = reshaped_text.replace('ﻟﺄ', 'ﻷ')
reshaped_text = reshaped_text.replace('ﻠﺄ', 'ﻸ')
reshaped_text = reshaped_text.replace('ﻟﺈ', 'ﻹ')
reshaped_text = reshaped_text.replace('ﻠﺈ', 'ﻺ')
reshaped_text = reshaped_text.replace('ﻟﺎ', 'ﻻ')
reshaped_text = reshaped_text.replace('ﻠﺎ', 'ﻼ')
#لعكس النص
if reverse == True:
reshaped_text = reshaped_text[::-1]
return reshaped_text
'''Output is a text'''
##--------------------------------------------------------------------------------------------------------------------------------------##
'''# To use:
APL.Extract(Any text, Before the wanted text, After the wanted text, Wanted text min size, Wanted text max size)'''
def Extract(text, before, after, min = False, max = False):
# المتغيرات
English_Letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
Symbols = '1234567890-=!"£$%^&*()_+`[];"@\|:~{}<>?./,# ' + "'"
extract, same = False, False
ExText, line = [], ''
text = text.replace(before, u'\uFFFE')
if before != after: text = text.replace(after, u'\uFFFF')
else: same = True
# استخراج النصوص
for char in text:
if ((char == u'\uFFFF' and same == False) or (char == u'\uFFFE' and same == True)) and extract == True:
extract = False
# التحقق من طول النص وتناسبه مع شروط الطول
if min != False:
if len(line) < min:
line = ''
if max != False:
if len(line) > max:
line = ''
if line != '':
ExText.append(line)
line = ''
if extract == True:
if char in English_Letters or char in Symbols: #لإبقاء فقط النصوص التي كل أحرفها في English_Letters أو Symbols
line += char
else:
line = ''
extract = False
if char == u'\uFFFE':
extract = True
line = ''
return ExText
'''Output is an array'''
##--------------------------------------------------------------------------------------------------------------------------------------##
'''# To use:
APL.Text_Box_Fitter(Any text, Text zone width, Lines number, New line command, New page command, Characters list: [[char 1, width], [char2, width]...])'''
def Text_Box_Fitter(Input_text, text_zone_width, lines_num, new_line_command, new_page_command, Chars):
#تفقد المعطيات
#---------------------------------------------
Errors = ''
c = False
if text_zone_width != 0:
for mini_list in Chars:
if mini_list[1] > text_zone_width:
if c == False:
Errors += 'Error 00:\n'
c = True
Errors += chr(mini_list[0]) + ' is wider than text_zone.\n'
if text_zone_width == 0:
if c == True: Errors += '\n'
Errors += "Error 01:\ntext_zone_width can't be 0.\n"
if c == False: c = True
if lines_num == 0:
if c == True: Errors += '\n'
Errors += "Error 02:\nlines_num can't be 0.\n"
if c == False: c = True
if c == True:
print(Errors)
exit()
#هنا يبدأ العمل الجاد
#---------------------------------------------
text_zone_width += 1
lines_num -= 2
text = A.Reshape_Arabic(Input_text, False)
new_text = ''
X, Y = 0, 0
word = ''
text_list = []
for char in text:
if char != ' ': word += char
else:
text_list.append(word)
text_list.append(' ')
word = ''
text_list.append(word)
word = ''
for item in text_list:
item_len = 0
for char in item:
found = False
for mini_list in Chars:
if ord(char) == mini_list[0]:
item_len += mini_list[1]
found = True
break
if found == False:
print('Error 03:\nchar ("' + char + '" , ' + str(ord(char)) + ') not found in table.')
exit()
if X + item_len > text_zone_width and text_zone_width > item_len:
if Y <= lines_num:
new_text += new_line_command
Y += 1
else:
new_text += new_page_command
Y = 0
X = 0
for char in item:
found = False
for mini_list in Chars:
if ord(char) == mini_list[0]:
if text_zone_width - X < mini_list[1]:
if Y <= lines_num:
new_text += new_line_command
Y += 1
else:
new_text += new_page_command
Y = 0
X = 0
X += mini_list[1]
new_text += char
found = True
break
if found == False:
print('Error 03:\nchar ("' + char + '" , ' + str(ord(char)) + ') not found in table.')
exit()
R = new_line_command + ' '
for R in new_text:
new_text = new_text.replace(new_line_command + ' ', new_line_command)
R = ' ' + new_line_command
for R in new_text:
new_text = new_text.replace(' ' + new_line_command, new_line_command)
R = new_page_command + ' '
for R in new_text:
new_text = new_text.replace(new_page_command + ' ', new_page_command)
R = ' ' + new_page_command
for R in new_text:
new_text = new_text.replace(' ' + new_page_command, new_page_command)
R = new_line_command + new_line_command
for R in new_text:
new_text = new_text.replace(new_line_command + new_line_command, new_line_command)
R = new_page_command + new_page_command
for R in new_text:
new_text = new_text.replace(new_page_command + new_page_command, new_page_command)
#إعادة النتيجة
#---------------------------------------------
return new_text
'''Output is a text'''
##--------------------------------------------------------------------------------------------------------------------------------------## | APL | /APL-0.0.10.tar.gz/APL-0.0.10/APl/APL.py | APL.py |
APPLPy
A Probability Programming Language -- Python Edition
What is APPLPy?
APPLPy stands for A Probability Programming Language -- Python Edition. The primary goal of APPLPy is to provide an open-source conceptual probability package capable of manipulating random variables symbolically. Although the Python implementation is a recent development, a version based on the Maple computer algebra system has been used for over a decade. The Maple implementation, called APPL, has been successfully integrated into mathematical statistics and computing courses at the College of William and Mary, the United States Military Academy and Colorado College, while also facilitating research in areas ranging from order statistics to queuing theory. The hope of APPLPy is to make the computational capabilities of APPL available to researchers and educators on an open-source platform.
The current capabilities of APPLPy include:
1. Conversion between PDF,CDF,SF,HF,CHF and IDF representations of random variables
2. Computation of expected values, with both numeric and symbolic output
3. Plotting distributions, including piece-wise distributions
4. One-to-one and many-to-one transformations of piecewise distributions
5. Random Variable Algebra (Sums/Differences/Products/Division)
6. Random sampling from distributions
7. Bootstrapping data sets
8. Bayesian inference with ad-hoc prior distributions
9. Computation of distributions for M/M/s queues
10. Analysis of Discrete Time Markov Chains
How is APPLPy Used?
Although APPLPy can be used for a variety of purposes, it is best suited to fill three special roles. First, it enables students to gain an intuitive understanding of mathematical statistics by automating tedious, calculus-intensive algorithms. As such, students can experiment with different models without having to perform difficult derivations or produce ad-hoc code. Second, it allows students to check hand derived results. This aids the learning process by providing a quick and reliable answer key. Finally, it allows researchers to explore systems whose properties would be intractable to derive by hand. As mentioned above, the Maple-based APPL software has already spawned a variety of insightful research. APPLPy has the potential to continue along this pathway. The simplicity of APPLPy's syntax allow users to explore stochastic models with ease.
INSTALLATION:
ApplPy requires the following dependencies in order to run properly:
1. SymPy
2. Matplotlib
The latests stable release of both of these packages can be downloading
from the python package index at https://pypi.python.org/pypi
The latest working edition of APPLPy is available on GitHub and the latest
stable release is available from the python package index. To install the
software, open the directory where APPLPy has been downloaded and type
the following command
$ python setup.py install
If you have any comments or suggestions for APPLPy, feel free to contact the author at [email protected]. Users with Python experience are encouraged to get in touch and contribute.
| APPLPy | /APPLPy-0.4.12.tar.gz/APPLPy-0.4.12/README.txt | README.txt |
from __future__ import division
from sympy import (Symbol, symbols, oo, integrate, summation, diff,
exp, pi, sqrt, factorial, ln, floor, simplify,
solve, nan, Add, Mul, Integer, function,
binomial, pprint, log)
from .rv import (RV, RVError, CDF, PDF, BootstrapRV,
ExpectedValue,Mean, Variance, Truncate)
x,y,z,t=symbols('x y z t')
"""
A Probability Progamming Language (APPL) -- Python Edition
Copyright (C) 2001,2002,2008,2010,2014 Andrew Glen, Larry
Leemis, Diane Evans, Matthew Robinson
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
"""
def BayesMenu():
print 'ApplPy Procedures'
print ""
print 'Procedure Notation'
print ""
print 'X is a likelihood function'
print 'Y is a prior distribution'
print 'x is an observed data point'
print 'Data is an observed set of data'
print 'entered as a list --> ex. Data=[1,12.4,34,.52.45,64]'
print 'low and high are numeric'
print ""
print ""
print 'Bayesian Statistics Procedures'
print 'Posterior(X,Y,x,param), BayesUpdate(X,Y,Data,param)'
print 'PosteriorPreidictive(X,Y,Data,param), TwoSample(X,Y,Data1,Data2)'
print 'CS(m,s,alpha,n,type),Jeffreys(X,low,high,param)'
print ""
def Posterior(LikeRV,PriorRV,data=[],param=Symbol('theta')):
"""
Procedure Name: BayesUpdate
Purpose: Derive a posterior distribution for a parameter
given a likelihood function, a prior distribution and
an observation
Arguments: 1. LikeRV: The likelihood function (a random variable)
2. PriorRV: A prior distribution (a random variable)
3. data: a data observation
4. param: the uknown parameter in the likelihood function
(a sympy symbol)
Output: 1. PostRV: A posterior distribution
"""
# If the unknown parameter is not a symbol, return an error
if type(param)!=Symbol:
raise RVError('the unknown parameter must be a symbol')
if PriorRV.ftype[0]=='continuous':
# Extract the likelihood function from the likelhood random
# variable
likelihood=LikeRV.func[0].subs(x,data[0])
for i in range(1,len(data)):
likelihood*=LikeRV.func[0].subs(x,data[i])
likelihood=simplify(likelihood)
likelihood=likelihood.subs(param,x)
# Create a list of proportional posterior distributions
FunctionList=[]
for i in range(len(PriorRV.func)):
# extract the prior distribution
prior=PriorRV.func[i]
# multiply by the likelihood function
proppost=likelihood*prior
# substitute the data observation
proppost=simplify(proppost)
# add to the function list
FunctionList.append(proppost)
if len(FunctionList) == 1:
c = integrate(FunctionList[0],
(x,PriorRV.support[0],PriorRV.support[1]))
func = (1/c)*FunctionList[0]
PostRV = RV(func,
PriorRV.support,['continuous','pdf'])
else:
PropPost=RV(FunctionList,
PriorRV.support,['continuous','pdf'])
# Normalize the posterior distribution
PostRV=Truncate(PropPost,
[PriorRV.support[0],PriorRV.support[-1]])
return PostRV
# If the prior distribution is discrete and the likelihood function
# is continuous, compute the posterior distribution
if PriorRV.ftype[0]=='discrete' and LikeRV.fype[0]=='continuous':
# Compute a distribution that is proportional to the posterior
# distribution
List1=[]
for i in range(len(PriorRV.support)):
likelihood=LikeRV.func[0]
likelihood=likelihood.subs(x,data)
# Substitute each point that appears in the support of
# the prior distribution into the likelihood distribution
subslike=likelihood.subs(param,PriorRV.support[i])
prior=PriorRV.func[i]
# Multiply the prior distribution by the likelihood function
priorXlike=simplify(priorXsubslike)
List1.append(priorXlike)
# Find the marginal distribution
marginal=sum(List1)
# Find the posterior distribution by dividing each value
# in PriorXLike by the marginal distribution
List2=[]
for i in range(len(List1)):
List2.append(List1[i]/marginal)
PostRV=RV(List2,PriorRV.support,PriorRV.ftype)
return PostRV
# If the prior distribution and the likelihood function are both
# discrete, compute the posterior distribution
if PriorRV.ftype[0]=='discrete' and LikeRV.ftype[0]=='discrete':
# If the prior distribution and the likelihood function do not
# have the same sizes, return and error
if len(PriorRV.func)!=len(LikeRV.func):
string='the number of values in the prior distribution and'
string+='likelihood function must be the same'
raise RVError(string)
# Multiply the prior distribution by the likelihood function
priorXlike=[]
for i in range(len(PriorRV.func)):
val=PriorRV.func[i]*LikeRV.func[i]
priorXlike.append(val)
# Compute the marginal distribution to normalize the posterior
k=sum(priorXlike)
# Compute the posterior distribution
posteriorlist=[]
for i in range(len(priorXlike)):
val=priorXlike/k
posteriorlist.append(val)
PostRV=RV(posteriorlist,PriorRV.support,PriorRV.ftype)
return PostRV
def CredibleSet(PostRV,alpha):
"""
Procedure Name: CredibleSet
Purpose: Produce a credible set given a likelihood function
and a confidence level
Arguments: 1. PostRV: The distribution of the parameter
2. alpha: the confidence level
Output: 1. CredSet: a credible set in the form of a list
"""
# If alpha is not between 0 and 1, return an error
if alpha<0 or alpha>1:
raise RVError('alpha must be between 0 and 1')
# Computer the upper bound of the credible set
lower=PostRV.variate(n=1,s=alpha/2)[0]
# Compute the lower bound of the credible set
upper=PostRV.variate(n=1,s=1-(alpha/2))[0]
CredSet=[lower,upper]
return CredSet
def JeffreysPrior(LikeRV,low,high,param):
"""
Procedure Name: JeffreysPrior
Purpose: Derive a Jeffreys Prior for a likelihood function
Arguments: 1. LikeRV: The likelihood function (a random variable)
2. low: the lower support
3. high: the upper support
4. param: the unknown parameter
Output: 1. JeffRV: the Jeffreys Prior distribution
"""
# If the likelihood function is continuous, compute the Jeffreys
# Prior
if LikeRV.ftype[0]=='continuous':
likelihood=LikeRV.func[0]
loglike=ln(likelihood)
logdiff=diff(loglike,param)
jefffunc=sqrt(integrate(likelihood*logdiff**2,
(x,LikeRV.support[0],
LikeRV.support[1])))
jefffunc=simplify(jefffunc)
jefffunc=jefffunc.subs(param,x)
JeffRV=RV([jefffunc],[low,high],LikeRV.ftype)
return JeffRV
# Old BayesUpdate code ... the Posterior procedure now computes
# the posterior distribution with only one integration. New
# code runs much faster
def BayesUpdate(LikeRV,PriorRV,data=[],param=Symbol('theta')):
"""
Procedure Name: Posterior
Purpose: Derive a posterior distribution for a parameter
given a likelihood function, a prior distribution and
a data set
Arguments: 1. LikeRV: The likelihood function (a random variable)
2. PriorRV: A prior distribution (a random variable)
3. data: a data set
4. param: the uknown parameter in the likelihood function
(a sympy symbol)
Output: 1. PostRV: A posterior distribution
"""
# Find the posterior distribution for the first observation
PostRV=BayesUpdate(LikeRV,PriorRV,[data[0]],param)
# If there are multiple observations, continue bayesian updating
# for each observation in the data set
if len(data)>1:
for i in range(1,len(data)):
# Set the previous posterior distribution as the new
# prior distribution
NewPrior=PostRV
# Compute the new posterior distribution for the next
# observation in the data set
PostRV=BayesUpdate(LikeRV,NewPrior,[data[i]],param)
return PostRV
def PosteriorPredictive(LikeRV,PriorRV,data=[],param=Symbol('theta')):
"""
Procedure Name: PosteriorPredictive
Purpose: Derive a posterior predictive distribution to predict the next
observation, given a likelihood function, a prior
distribution and a data vector
Arguments: 1. LikeRV: The likelihood function (a random variable)
2. PriorRV: A prior distribution (a random variable)
3. data: a data set
4. param: the uknown parameter in the likelihood function
(a sympy symbol)
Output: 1. PostPredRV: A posterior predictive distribution
"""
# If the prior distribution is continuous, compute the posterior
# predictive distribution
if PriorRV.ftype[0]=='continuous':
# Compute the posterior distribution
PostRV=Posterior(LikeRV,PriorRV,data,param)
posteriorfunc=PostRV.func[0].subs(x,param)
likelihoodfunc=LikeRV.func[0]
postXlike=posteriorfunc*likelihoodfunc
postpredict=integrate(postXlike,
(param,PriorRV.support[0],
PriorRV.support[1]))
postpredict=simplify(postpredict)
PostPredRV=RV([postpredict],LikeRV.support,LikeRV.ftype)
return PostPredRV | APPLPy | /APPLPy-0.4.12.tar.gz/APPLPy-0.4.12/applpy/bayes.py | bayes.py |
from __future__ import division
from sympy import (Symbol, symbols, oo, integrate, summation, diff,
exp, pi, sqrt, factorial, ln, floor, simplify,
solve, nan, Add, Mul, Integer, function,
binomial, pprint,log,expand,zoo,latex,Piecewise)
from sympy.plotting.plot import plot
from random import random
import numpy as np
import pylab as pyplt
from .rv import RV, RVError
x,y,z,t=symbols('x y z t')
"""
A Probability Progamming Language (APPL) -- Python Edition
Copyright (C) 2001,2002,2008,2010,2014 Andrew Glen, Larry
Leemis, Diane Evans, Matthew Robinson
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
"""
class BivariateRV:
"""
BivariateRV Class:
Defines the data structure for bivariate random variables
Defines special procedures for bivariate random variables
"""
def __init__(self,func,constraints,ftype=['continuous','pdf']):
"""
Procedure Name: __init__
Purpose: Creates an instance of the bivariate random variable class
Arguments:
self.func: a list of the functions f(x,y) for the random variable
self.constraints: a list of constraints for the random variable.
The list of constraints must satisfy the following conditions:
1. The constraints must be entered in adjacent order;
clockwise or counterclockwise is acceptable
2. The constraints must completely enclose a region
3. The constraints must be entered as strictly inequalities
in the form 0<f(x,y). For instance, x**2<sqrt(y) would
be entered as sqrt(y)-x**2.
4. Except for constraints inthe form x<a or x>a, each
constraint must pass the vertical line test. i.e. There
should only be one y value associated with each x value.
self.ftype: a list of two strings. The first indicates whether the
random varable is discrete or continuous. The second specifies
the form for the represenation of the random variable (pdf,cdf,
etc)
Output: 1. An instance of the bivariate random variable class
"""
# If the function argument is not given in the form to a list, change
# it into list format
if isinstance(func,list)!=True:
func1=func
func=[func1]
# Check to make sure that the constraints are given in the form of
# a list
if isinstance(constraints,list)!=True:
raise RVError('Constraints must be entered as a list')
# Make sure that the random variable is either discrete or continuous
if ftype[0] not in ['continuous','discrete','Discrete']:
err_string='Random variables must be discrete or continuous'
raise RVError(err_string)
# Check to make sure the constraint list has the correct length
# The list of constraints should have the same number of elements
# as the list of functions
if len(constraints)-len(func)!=0:
err_string='a bivariate random variable must have one set of'
err_string+=' constraints for each function that is entered'
# Initialize the random variable
self.func=func
self.constraints=constraints
self.ftype=ftype
self.cache=None
"""
Special Class Methods:
1. __repr__(self)
2. __len__(self)
"""
def __repr__(self):
"""
Procedure Name: __repr__
Purpose: Sets the default string display setting for the bivariate
random variable class
Arguments: 1. self: the random variable
Output: 1. A series of print statements describing each
segment of the random variable
"""
return repr(self.display(opt='repr'))
def __len__(self):
"""
Procedure Name: __len__
Purpose: Sets the behavior for the len() procedure when an instance
of the random variable class is given as input. This
procedure will return the number of pieces if the
distribution is piecewise.
Arguments: 1. self: the random variable
Output: 1. the number of segments in the random variable
"""
return len(self.func)
"""
Utility Methods
Procedures:
1. add_to_cache(self,object_name,object)
2. display(self)
3. init_cache(self)
4. verifyPDF(self)
"""
def add_to_cache(self,object_name,obj):
"""
Procedure Name: add_to_cache
Purpose: Stores properties of the random variable (i.e. mean, variance,
cdf, sf) in memory. The next time a function is called to
compute that property, APPLPy will retrieve the object
from memory.
Arguments: 1. self: the random variable
2. object_name: the key for the object in the cache
dictionary
3. obj: the object to be stored in memory.
Output: 1. No output. The self.cache property of the random
variable is modified to include the specified
object.
"""
# If a cache for the random variable does not exist, initialize it
if self.cache==None:
self.init_cache()
# Add an object to the cache dictionary
self.cache[object_name]=obj
def display(self,opt='repr'):
"""
Procedure Name: display
Purpose: Displays the random variable in an interactive environment
Arugments: 1. self: the random variable
Output: 1. A print statement for each piece of the distribution
indicating the function and the relevant support
"""
if self.ftype[0] in ['continuous','Discrete']:
print ('%s %s'%(self.ftype[0],self.ftype[1]))
for i in range(len(self.func)):
cons_list=['0<'+str(cons) for cons in self.constraints[i]]
cons_string=', '.join(cons_list)
print('for x,y enclosed in the region:')
print(cons_string)
print('---------------------------')
pprint(self.func[i])
print('---------------------------')
if i<len(self.func)-1:
print(' ');print(' ')
if self.ftype[0]=='discrete':
print '%s %s where {x->f(x)}:'%(self.ftype[0],
self.ftype[1])
for i in range(len(self.support)):
if i!=(len(self.support)-1):
print '{%s -> %s}, '%(self.support[i],
self.func[i]),
else:
print '{%s -> %s}'%(self.support[i],
self.func[i])
def init_cache(self):
"""
Procedure Name: init_cache
Purpose: Initializes the cache for the random variable
Arguments: 1. self: the random variable
Output: 1. The cache attribute for the random variable
is initialized
"""
self.cache={}
def verifyPDF(self):
"""
Procedure Name: verifyPDF
Purpose: Verifies where or not the random variable is valid. It first
checks to make sure the pdf of the random variable
integrates to one. It then checks to make sure the random
variable is strictly positive
Arguments: 1. self: the random variable
Output: 1. print statement that displays that volume under the
random variable and a second statement that
shows whether or not the random variable is valid
"""
# Check to make sure that the random variable is entered as a
# continuous pdf
if ftype!=['continuous','pdf']:
err_string='verifyPDF currently only supports continuous pdfs'
raise RVError(err_string)
totalPDF=0
absPDF=0
# i loops through the number of segments of XY
for i in range(len(self.func)):
x='x';y='y'
ncons=len(self.constraints[i])
# list of x intercepts and corresponding y intercepts
xinters=[]
yinters=[]
# corresponding lines 1 and 2
line1=[]
line2=[]
# j loops through the constraints for segment i
for j in range(ncons):
cons_j=self.constraints[i][j]
cons_mod=self.constraints[i][(j+1)%ncons]
# Use solve to compute the intersect point for each of the
# adjacent constraints. cons_j is the jth constraint
# and cons_mod uses modular division to find the adjacent
# constraint (moves to 0 after last adjacent segment).
# Intercepts are created first as a list to all the
# algorithm to detect multiple intercepts
temp=solve([cons_j,cons_mod],x,y,dict=True)
if cons_j==oo and cons_mod==0:
if cons_j==x:
temp=[{x:oo,y:0}]
else:
temp=[{x:0,y:oo}]
if cons_j==-oo and cons_mod==0:
if cons_j==x:
temp=[{x:-oo,y:0}]
else:
temp=[{x:0,y:-oo}]
if cons_j==0 and cons_mod==oo:
if cons_j==x:
temp=[{y:oo,x:0}]
else:
temp=[{y:0,x:oo}]
if cons_j==0 and cons_mod==-oo:
if cons_j==x:
temp=[{y:-oo,x:0}]
else:
temp=[{y:0,x:-oo}]
if len(temp)>1:
err_string='Adjacent constraints intersect at '
err_string='two or more points'
raise RVError(err_string)
elif len(temp)==0:
err_string='Adjacent constraints do not intersect'
raise RVError(err_string)
if len(temp)!=0:
line1.append(cons_j)
line2.append(cons_mod)
if len(temp)!=0:
xinters.append(temp[0][x])
yinters.append(temp[0][y])
if len(xinters)==ncons+1:
print('Unbounded')
# Bubble sort all four lists with respect to xinters
for ib in range(len(xinters)-1):
for jb in range(ib+1,len(xinters)):
if xinters[ib]>xinters[jb]:
# Swap relevant indices in for the intercepts and lines
tempb=xinters[ib]
xinters[ib]=xinters[jb]
xinters[jb]=tempb
tempb=yinters[ib]
yinters[ib]=yinters[jb]
yinters[jb]=tempb
tempb=line1[ib]
line1[ib]=line1[jb]
line1[jb]=tempb
tempb=line2[ib]
line2[ib]=line2[jb]
line2[jb]=tempb
# Default yupper and ylower assuming the start off is a vert line
set1=_union(line1[0],line2[0])
set2=_union(line1[1],line2[1])
yupper=_intersect(set1,set2)
ylower=_intersect(set1,set2)
start=1
# Start off from a point (figure out yupper and ylower)
if xinters[0]!=yinters[1]:
eqn=solve(line1[0],y)-solve(line2[0],y)
area=integrate(eqn,(x,xinters[0],xinters[1]))
# PDF evaluated over the segment
if area>0:
yupper=line1[0]
ylower=line2[0]
else:
ylower=line1[0]
yupper=line2[0]
y_0=solve(ylower,y)
y_1=solve(yupper,y)
x_0=xinters[0]
x_1=xinters[1]
totalPDF+=integrate(integrate(XY.func[i],
(y,y_0,y_1)),
(x,x_0,x_1))
# Not yet supported by sympy
#absPDF+=integrate(integrate(abs(XY.func[i]),
# (y,y_0,y_1)),
# (x,x_0,x_1))
start=2
# Triangle case (left = point, right = line)
if start==2 and len(xinters)==3 and xinters[2]==xinters[1]:
start=4
# Begin calculating PDFs
ind=start-1
while ind<len(xinters):
# left xinters lie on a vertical line
if xinters[ind]==xinters[ind+1] and ind!=len(xinters):
# y ind < ind+1 => ylower is the other line intersecting
# ind and yupper is otherline intersecting ind+1
if yinters[ind]<yinters[ind+1]:
ylower=_union(line1[ind],line2[ind]).remove(ylower)
yupper=_union(line1[ind+1],line2[ind+1]).remove(ylower)
else:
yupper=_union(line1[ind],line2[ind]).remove(ylower)
ylower=_union(line1[ind+1],line2[ind+1]).remove(ylower)
ylower=ylower[0]
yupper=yupper[0]
# Infinity case
lowerB=solve(ylower,y)
upperB=solve(yupper,y)
if yupper==oo:
upperB=oo
if ylower==-oo:
lowerB=-oo
totalPDF+=integrate(integrate(XY.func[i],
(y,lowerB,upperB)),
(x,xinters[ind],xinters[ind+1]))
# Not yet supported by sympy
#absPDF+=integrate(integrate(abs(XY.func[i]),
# (y,lowerB,upperB)),
# (x,xinters[ind],xinters[ind+1]))
ind+=1
# Left x is only one point
'''
Supporting Functions:
1. _intersection(a,b)
2. _union(a,b)
'''
def _intersection(a,b):
"""
Procedure Name: _intersection
Purpose: Returns the intersection of two lists
Arguments: 1. a: a list
2. b: a list
Output: 1. A list containing the intersection of a and b
"""
if type(a) != list:
a=[a]
if type(b) != list:
b=[b]
return list(set(a) & set(b))
def _union(a,b):
"""
Procedure Name: _union
Purpose: Returns the union of two lists
Arguments: 1. a: a list
2. b: a list
Output: 1. A list containing the union of a and b
"""
if type(a) != list:
a=[a]
if type(b) != list:
b=[b]
return list(set(a) | set(b)) | APPLPy | /APPLPy-0.4.12.tar.gz/APPLPy-0.4.12/applpy/bivariate.py | bivariate.py |
from __future__ import division
from sympy import (Symbol, symbols, oo, integrate, summation, diff,
exp, pi, sqrt, factorial, ln, floor, simplify,
solve, nan,Add, Mul, Integer, function,
binomial, pprint, nsolve,log)
from random import random
from .rv import (RV, RVError, CDF, PDF, BootstrapRV,
ExpectedValue,Mean,Variance)
x,y,z,t=symbols('x y z t')
"""
A Probability Progamming Language (APPL) -- Python Edition
Copyright (C) 2001,2002,2008,2010,2014 Andrew Glen, Larry
Leemis, Diane Evans, Matthew Robinson
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
"""
def KSTest(RVar,data):
"""
Procedure Name: KSTest
Purpose: Calculates the Kolmogorov-Smirnoff test statistic
for the empirical CDF of the sample data versus
the CDF of a fitted distribution with random
variable X
Arguments: 1. RVar: A random variable model
2. data: A data sample in list format
Output: 1. The Kolmogorov-Smirnoff test statistics
"""
# Create an empirical CDF from the data sample
EmpCDF=CDF(BootstrapRV(data))
m=len(EmpCDF.support)
# Compute fitted CDF values
FX=CDF(RVar)
FittedCDFValue=[]
for i in EmpCDF.support:
FittedCDFValue.append(CDF(FX,i).evalf())
# Compute the KS test statistic
KS=0
for i in range(m-1):
Dpos=abs(EmpCDF.func[i+1]-FittedCDFValue[i]).evalf()
Dneg=abs(FittedCDFValue[i]-EmpCDF.func[i]).evalf()
KS=max(max(KS,Dpos),Dneg)
KS=max(KS,abs(FittedCDFValue[m-1]).evalf())
return KS
def MOM(RVar,data,parameters,guess=None,numeric=False):
"""
Procedure Name: MLE
Purpose: Estimates parameters using the method of moments
Arguments: 1. RVar: A random variable model
2. data: The data sample
3. parameters: The list of parameters to estimate
4. guess: An initial guess for the unknown parameters,
required if numerical methods are being used
5. numeric: A binary variable. If True, MOM will attempt
to solve for unknown parameters using numerical
methods
Output: 1. The estimates in dictionary form
"""
# Convert the random variable to pdf form
fx=PDF(RVar)
# Creat a bootstrap random variable from the sample
xstar=BootstrapRV(data)
# Create a list of equations to solve
soln_eqn=[]
for i in range(len(parameters)):
val=ExpectedValue(xstar,x**(i+1))
expect=ExpectedValue(fx,x**(i+1))
soln_eqn.append(val-expect)
# Create a list of solutions
if numeric==False:
try:
soln=solve(soln_eqn,set(parameters))
except:
err_string='MOM failed to solve for the parameters,'
err_string+=' please try numerical MOM'
raise RVError(err_string)
elif numeric==True:
if guess==None:
err_string='an initial guess must be entered to'
err_string+=' solve MLE numerically'
raise RVError(err_string)
soln_tup=tuple(soln_eqn)
param_tup=tuple(parameters)
guess_tup=tuple(guess)
soln=nsolve(soln_tup,param_tup,guess_tup)
return soln
def MLE(RVar,data,parameters,guess=None,numeric=False,censor=None):
"""
Procedure Name: MLE
Purpose: Estimates parameters using maximum likelihood estimation
Arguments: 1. RVar: A random variable model
2. data: The data sample
3. parameters: The parameters to be estimated
4. censor: A binary list of 0's and 1's where 1
indicates an observed value and 0 indicates
a right censored value
5. guess: An initial guess for the unknown parameters,
required if numerical methods are being used
6. numeric: A binary variable. If True, MLE will attempt
to solve for unknown parameters using numerical
methods
Output: 1. A list of parameter estimates
"""
# Return an error message if the distribution is piece-wiwse
if len(RVar.func)!=1:
raise RVError('MLE does not accept piecewise models')
# If the random variable has a hard-coded MLE procedure, use
# the corresponding procedure
if RVar.__class__.__name__=='NormalRV':
if censor==None:
if len(parameters)==2:
return MLENormal(data)
if len(parameters)==1:
string='MLE is estimating mu and sigma parameters'
string+=' for the Normal distribution'
print(string)
return MLENormal(data)
if RVar.__class__.__name__=='ExponentialRV':
return MLEExponential(data)
if RVar.__class__.__name__=='WeibullRV':
return MLEWeibull(data,censor)
if RVar.__class__.__name__=='PoissonRV':
return MLEPoisson(data)
# Convert the random variable to its PDF form
fx=PDF(RVar)
if censor==None:
LogLike=0
for i in range(len(data)):
func=ln(fx.func[0])
LogLike+=func.subs(x,data[i])
# Otherwise, use the given value as a censor
elif censor!=None:
# Check to make sure the list contains only 1's and
# 0's
for i in range(len(censor)):
if censor[i] not in [0,1]:
return RVError('Censor may contain only 1s and 0s')
# Check to make sure the censor list is the same
# length as the data list
if len(censor)!=len(data):
return RVError('Data and censor must be the same length')
hx=HF(RVar)
chx=CHF(RVar)
# Split up the sample data into two lists, censored
# and uncensored
censored=[]
uncensored=[]
for i in range(len(data)):
if censor[i]==1:
uncensored.append(data[i])
elif censor[i]==0:
censored.append(data[i])
# Compute and simplify the log-likelihood function
Logh=0
Sumch=0
for i in range(len(uncensored)):
func=ln(hx.func[0])
Logh+=func.subs(x,uncensored[i])
for i in range(len(data)):
func=ln(chx.func[0])
Sumch+=func.subs(x,data[i])
LogLike=simplify(Logh-Sumch)
# Differentiate the log likelihood function with respect to
# each parameter and equate to 0
DiffLogLike=[]
for i in range(len(parameters)):
func=diff(LogLike,parameters[i])
DiffLogLike.append(simplify(func))
# Solve for each parameter
if numeric==False:
try:
soln=solve(DiffLogLike,set(parameters))
except:
err_string='MLE failed to solve for the parameters, '
err_string+='please try the numeric MLE method'
raise RVError(err_string)
elif numeric==True:
if guess==None:
err_string='an initial guess must be entered to'
err_string+=' solve MLE numerically'
raise RVError(err_string)
diff_tup=tuple(DiffLogLike)
param_tup=tuple(parameters)
guess_tup=tuple(guess)
soln=nsolve(diff_tup,param_tup,guess_tup)
return soln
def MLEExponential(data):
"""
Procedure Name: MLEExponential
Purpose: Conduct maximul likelihood estimation on an
exponential distribution
Input: 1. data: a data set
Output: 1. soln: an estimation for the unknown parameter
"""
Xstar=BootstrapRV(data)
theta=1/Mean(Xstar)
soln=[theta]
return soln
def MLENormal(data,mu=None,sigma=None):
"""
Procedure Name: MLENormal
Purpose: Conduct maximum likelihood estimation on a normal
distribution with at least one unknown parameter
Input: 1. data: a data set
2. mu: an optional parameter that holds mu constant. If a
value is entered, MLENormal will estimate sigma given
a fixed mu
3. sigma: an optional parameter that holds sigma constant
Output: 1. soln: a list of estimates for the unknown parameters
in the form [mu,sigma]
"""
Xstar=BootstrapRV(data)
if mu==None:
mu=Mean(Xstar)
if sigma==None:
sigma=sqrt(Variance(Xstar))
soln=[mu,sigma]
return soln
def MLEPoisson(data):
"""
Procedure Name: MLEPoisson
Purpose: Conduct maximum likelihood estimation for the Poisson
distribution
Input: 1. data: a data set
Output: 1. soln: a list of estimates for the unknown parameter
in the form [theta]
"""
Xstar=BootstrapRV(data)
meanX=Mean(Xstar)
soln=[meanX]
return soln
def MLEWeibull(data,censor=None):
"""
Procedure Name: MLEWeibull
Purpose: Conduct maximum likelihood estimation for the Weibull
distribution with arbitrary right censor
Input: 1. data: a data set
2. censor: a indicator list where 1 is an observed data
point and 0 is an unobserved data point
Output: 1. soln: a list of estimates for the unknown parameters
in the form [theta,kappa]
"""
# If a list of right censored values is not provided, set
# the right censor list to contain all 1's, indicating
# that every value was observed
n=len(data)
if censor!=None:
Delta=censor
else:
Delta=[1 for obs in data]
# Set tolerance and initial estimate
epsilon=0.000000001
c=1
# Compute the number of observed failures
r=sum(Delta)
# Calculate s1
s1=0
for i in range(n):
if Delta[i]==1:
s1+=log(data[i])
# Calculate s2 (beginning of random censoring adjustment)
s2=0
for i in range(n):
s2+=data[i]**c
# Calculate s3
s3=0
for i in range(n):
s3+=data[i]**c*log(data[i])
while r*s3-s1*s2<=0:
c=c*1.1
s2=0
for i in range(n):
s2+=data[i]**c
s3=0
for i in range(n):
s3+=data[i]**c*log(data[i])
# Calculate s2 (beginning of first iteration of while loop)
s2=0
for i in range(n):
s2+=data[i]**c
# Calculate s3
s3=0
for i in range(n):
s3+=data[i]**c*log(data[i])
# Calculate q and c
q=r*s2/(r*s3-s1*s2)
c=(c+q)/2
counter=0
while abs(c-q)>epsilon and counter<100:
counter+=1
s2=0
for i in range(n):
s2+=data[i]**c
s3=0
for i in range(n):
s3+=data[i]**c*log(data[i])
q=r*s2/(r*s3-s1*s2)
c=(c+q)/2
# Calculate the MLEs
chat=c
s2=0
for i in range(n):
s2+=data[i]**c
bhat=(s2/r)**(1/c)
soln=[1/bhat,chat]
return soln | APPLPy | /APPLPy-0.4.12.tar.gz/APPLPy-0.4.12/applpy/stats.py | stats.py |
from __future__ import division
from sympy import (Symbol, symbols, oo, integrate, summation, diff,
exp, pi, sqrt, factorial, ln, floor, simplify,
solve, nan, Add, Mul, Integer, function,
binomial, pprint,log,expand,zoo,latex,Piecewise,Rational,
Sum,S,Float,limit)
from sympy.plotting.plot import plot
from random import random
import numpy as np
import pickle
try:
import seaborn
except:
pass
import matplotlib.pylab as plt
x,y,z,t=symbols('x y z t')
"""
A Probability Progamming Language (APPL) -- Python Edition
Copyright (C) 2001,2002,2008,2010,2014 Andrew Glen, Larry
Leemis, Diane Evans, Matthew Robinson
This program is free softwarexie: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
"""
class RVError(Exception):
"""
RVError Class
Defines a custom error message for exceptions relating
to the random variable class
"""
def __init__(self,value):
self.value=value
def __str__(self):
return repr(self.value)
class RV:
"""
RV Class
Defines the data structure of ApplPy random variables
Defines procedures relating to ApplPy random variables
"""
def __init__(self,func,support,ftype=['continuous','pdf']):
"""
Creates an instance of the random variable class
The random variable default is to produce a continuous pdf
Checks the random variable for errors
"""
# Check for errors in the data structure of the random
# variable
# Check to make sure that the given function is in the
# form of a list
# If it is not in the form of a list, place it in a list
if isinstance(func,list)!=True:
func1=func
func=[func1]
# Check to make sure that the given support is in the form of
# a list
if isinstance(support,list)!=True:
raise RVError('Support must be a list')
# Check to make sure that the random variable is either
# discrete or continuous
if ftype[0] not in ['continuous','discrete','Discrete']:
string='Random variables must either be discrete'
string+=' or continuous'
raise RVError(string)
# Check to make sure that the support list has the correct
# length
# The support list should be one element larger than the
# function list for continuous distributions, and the same
# size for discrete
if ftype[0] in ['continuous','Discrete']:
if len(support)-len(func)!=1:
string='Support has incorrect number of elements'
raise RVError(string)
if ftype[0]=='discrete':
if len(support)-len(func)!=0:
string='Support has incorrect number of elements'
raise RVError(string)
# Check to make sure that the elements of the support list are
# in ascending order
for i in range(len(support)-1):
# Only compare if the supports are numbers
if type(support[i]) in [int,float]:
if type(support[i+1]) in [int,float]:
if support[i]>support[i+1]:
raise RVError('Support is not in ascending order')
# Initialize the random variable
self.func = func
self.support = support
self.ftype = ftype
self.cache = None
self.filename = None
"""
Special Class Methods
Procedures:
1. __repr__(self)
2. __len__(self)
3. __pos__(self)
4. __neg__(self)
5. __abs__(self)
6. __add__(self,other)
7. __radd__(self,other)
8. __sub__(self,other)
9. __rsub__(self,other)
10. __mul__(self,other)
11. __rmul__(self,other)
12. __truediv__(self,other)
13. __rtruediv__(self,other)
14. __pow__(self,n)
15. __eq__(self,other)
"""
def __repr__(self):
"""
Procedure Name: __repr__
Purpose: Sets the default string display setting for the random
variable class
Arguments: 1. self: the random variable
Output: 1. A series of print statements describing
each segment of the random variable
"""
return repr(self.display(opt='repr'))
def __len__(self):
"""
Procedure Name: __len__
Purpose: Sets the behavior for the len() procedure when an instance
of the random variable class is given as input. This
procedure will return the number of pieces if the
distribution is piecewise.
Arguments: 1. self: the random variable
Output: 1. the number of segments in the random variable
"""
return len(self.func)
# The following procedures set the behavior for the +,-,* and / operators,
# as well as the behavior for negation and absolute value. If the
# operators are used with two random variables are used, APPLPy calls
# the product or convolution commands. If the operators are used
# with a random variable and a constant, the random variable can be
# shifted or scaled.
def __pos__(self):
"""
Procedure Name: __pos__
Purpose: Implements the behavior for the positive operator
Arguments: 1. self: the random variable
Output: 1. The same random variable
"""
return(self)
def __neg__(self):
"""
Procedure Name: __neg__
Purpose: Implements the behavior for negation
Arguments: 1. self: the random variable
Output: 1. The negative transformation of the random variable
"""
gX=[[-x],[-oo,oo]]
neg=Transform(self,gX)
return(neg)
def __abs__(self):
"""
Procedure Name: __abs__
Purpose: Implements the behavior of random variables passed to the
abs() function
Arguments: 1. self: the random variable
Output: 1. The absolute value of the random variable
"""
gX=[[abs(x)],[-oo,oo]]
abs_rv=Transform(self,gX)
return(abs_rv)
def __add__(self,other):
"""
Procedure Name: __add__
Purpose: If two random variables are passed to the + operator,
the convolution of those random variables is returned.
If a constant is added to the random variable, the
random variable is shifted by that constant
Arguments: 1. self: the random variable
2. other: a constant or random variable
Output: 1. A new random variable
"""
# If the random variable is added to another random variable,
# return the convolution of the two random variables
if 'RV' in other.__class__.__name__:
try:
return Convolution(self,other)
except:
return Convolution(other,self)
else:
raise RVError('Could not compute the convolution')
# If the random variable is added to a constant, shift
# the random variable
if type(other) in [float,int]:
gX=[[x+other],[-oo,oo]]
return Transform(self,gX)
def __radd__(self,other):
"""
Procedure Name: __radd__
Purpose: If two random variables are passed to the + operator,
the convolution of those random variables is returned.
If a constant is added to the random variable, the
random variable is shifted by that constant.
__radd__ implements the reflection of __add__
Arguments: 1. self: the random variable
2. other: a constant or random variable
Output: 1. A new random variable
"""
return self.__add__(other)
def __sub__(self,other):
"""
Procedure Name: __sub__
Purpose: If two random variables are passed to the - operator,
the difference of those random variables is returned.
If a constant is subracted from the random variable, the
random variable is shifted by that constant
Arguments: 1. self: the random variable
2. other: a constant or random variable
Output: 1. A new random variable
"""
# If the random variable is subtracted by another random variable,
# return the difference of the two random variables
if 'RV' in other.__class__.__name__:
gX=[[-x],[-oo,oo]]
RVar=Transform(other,gX)
return Convolution(self,RVar)
# If the random variable is subtracted by a constant, shift
# the random variable
if type(other) in [float,int]:
gX=[[x-other],[-oo,oo]]
return Transform(self,gX)
def __rsub__(self,other):
"""
Procedure Name: __rsub__
Purpose: If two random variables are passed to the - operator,
the difference of those random variables is returned.
If a constant is subracted from the random variable, the
random variable is shifted by that constant
Arguments: 1. self: the random variable
2. other: a constant or random variable
Output: 1. A new random variable
"""
# Perform an negative transformation of the random variable
neg_self=-self
# Add the two components
return neg_self.__add__(other)
def __mul__(self,other):
"""
Procedure Name: __mul__
Purpose: If two random variables are passed to the * operator,
the product of those random variables is returned.
If a constant is multiplied by the random variable, the
random variable is scaled by that constant
Arguments: 1. self: the random variable
2. other: a constant or random variable
Output: 1. A new random variable
"""
# If the random variable is multiplied by another random variable,
# return the product of the two random variables
if 'RV' in other.__class__.__name__:
try:
return Product(self,other)
except:
return Product(other,self)
else:
raise RVError('Could not compute the product')
# If the random variable is multiplied by a constant, scale
# the random variable
if type(other) in [float,int]:
gX=[[x*other],[-oo,oo]]
return Transform(self,gX)
def __rmul__(self,other):
"""
Procedure Name: __rmul__
Purpose: If two random variables are passed to the * operator,
the product of those random variables is returned.
If a constant is multiplied by the random variable, the
random variable is scaled by that constant
Arguments: 1. self: the random variable
2. other: a constant or random variable
Output: 1. A new random variable
"""
return self.__mul__(other)
def __truediv__(self,other):
"""
Procedure Name: __truediv__
Purpose: If two random variables are passed to the / operator,
the quotient of those random variables is returned.
If a constant is multiplied by the random variable, the
random variable is scaled by the inverse of that constant
Arguments: 1. self: the random variable
2. other: a constant or random variable
Output: 1. A new random variable
"""
# If the random variable is divided by another random variable,
# return the quotient of the two random variables
if 'RV' in other.__class__.__name__:
gX=[[1/x,1/x],[-oo,0,oo]]
RVar=Transform(other,gX)
return Product(self,RVar)
# If the random variable is divided by a constant, scale
# the random variable by theinverse of the constant
if type(other) in [float,int]:
gX=[[x/other],[-oo,oo]]
return Transform(self,gX)
def __rtruediv__(self,other):
"""
Procedure Name: __rtruediv__
Purpose: If two random variables are passed to the / operator,
the quotient of those random variables is returned.
If a constant is multiplied by the random variable, the
random variable is scaled by the inverse of that constant
Arguments: 1. self: the random variable
2. other: a constant or random variable
Output: 1. A new random variable
"""
## Invert the random variable
gX=[[1/x,1/x],[-oo,0,oo]]
invert=Transform(self,gX)
## Call the multiplication function
div_rv=invert.__mul__(other)
return div_rv
def __pow__(self,n):
"""
Procedure Name: __pow__
Purpose: If the '**' operator is used on a random variable, the
IID product of the random variable is returned
Arguments: 1. self: the random variable
2. n: the number of iid random variables
Output: 1. The distribution of n iid random variables
"""
# Raise an error if a non-integer value is passed to n
if type(n)!=int:
error_string='a random variable can only be raised to an'
error_string+=' integer value'
raise RVError(error_string)
pow_rv=Pow(self,n)
return pow_rv
def __eq__(self,other):
"""
Procedure Name: __eq__
Purpose: Checks for equality of the two random variables by using
the following algorithm:
1. Test if the support of both random variables
are equal
2. Test to see if each section of the random variable
simplifies to zero when subtracted from the
corresponding segment of the second random
variables
Arguments: 1. self: the random variable
2. other: a second random variable
Output: 1. True if the the random variables are equal, False
otherwise
"""
# If the other is not a random variable, return an error
if 'RV' not in other.__class__.__name__:
error_string='a random variable can only be checked for'
error_string+=' equality with another random variable'
raise RVError(error_string)
# Check to see if the supports of the random variables are
# equal
if not self.support==other.support:
return False
# Subtract each each segment from self from the corresponding
# segment from other, check to see if the difference
# simplifies to zero
for i in range(len(self.func)):
difference=self.func[i]-other.func[i]
difference=simplify(difference)
difference=expand(difference)
if not difference==0:
return False
# If all of the segments simplify to zero, return True
return True
"""
Utility Methods
Procedures:
1. add_assumptions(self,option)
2. add_to_cache(self,object_name,object)
3. display(self)
4. drop_assumptions(self)
5. init_cache(self)
6. latex(self)
7. save(self,filename)
8. simplify(self,assumption)
9. verifyPDF(self)
10. variate(self,n)
"""
def add_assumptions(self, option):
"""
Procedure Name: drop_assumptions
Purpose: Adds assumptions on the random variable to support operations
on multiple random variables
Arugments: 1. self: the random variable
2. option: the type of assumption to add
Output: 1. Modified function with assumptions added
"""
if option not in ['positive','negative','nonpositive','nonnegative']:
err_str = 'The only available options are positive, negative,'
err_str += ' nonpositive and nonnegative'
raise RVError(err_str)
if option == 'positive':
x = Symbol('x', positive = True)
elif option == 'negative':
x = Symbol('x', negative = True)
elif option == 'nonpositive':
x = Symbol('x', nonpositive = True)
elif option == 'nonnegative':
x = Symbol('x', nonnegative = True)
for i,function in enumerate(self.func):
function = function.subs(Symbol('x'), x)
self.func[i] = function
def add_to_cache(self,object_name,obj):
"""
Procedure Name: add_to_cache
Purpose: Stores properties of the random variable (i.e. mean, variance,
cdf, sf) in memory. The next time a function is called to
compute that property, APPLPy will retrieve the object
from memory.
Arguments: 1. self: the random variable
2. object_name: the key for the object in the cache
dictionary
3. obj: the object to be stored in memory.
Output: 1. No output. The self.cache property of the random
variable is modified to include the specified
object.
"""
# If a cache for the random variable does not exist, initialize it
if self.cache==None:
self.init_cache()
# Add an object to the cache dictionary
self.cache[object_name]=obj
def display(self,opt='repr'):
"""
Procedure Name: display
Purpose: Displays the random variable in an interactive environment
Arugments: 1. self: the random variable
Output: 1. A print statement for each piece of the distribution
indicating the function and the relevant support
"""
if self.ftype[0] in ['continuous','Discrete']:
print ('%s %s'%(self.ftype[0],self.ftype[1]))
for i in range(len(self.func)):
print('for %s <= x <= %s'%(self.support[i],
self.support[i+1]))
print('---------------------------')
pprint(self.func[i])
print('---------------------------')
if i<len(self.func)-1:
print(' ');print(' ')
if self.ftype[0]=='discrete':
print '%s %s where {x->f(x)}:'%(self.ftype[0],
self.ftype[1])
for i in range(len(self.support)):
if i!=(len(self.support)-1):
print '{%s -> %s}, '%(self.support[i],
self.func[i]),
else:
print '{%s -> %s}'%(self.support[i],
self.func[i])
def drop_assumptions(self):
"""
Procedure Name: drop_assumptions
Purpose: Drops assumptions on the random variable to support operations
on multiple random variables
Arugments: 1. self: the random variable
Output: 1. Modified function with assumptions dropped
"""
x = Symbol('x')
for i,function in enumerate(self.func):
function = function.subs(Symbol('x', negative = True), x)
function = function.subs(Symbol('x', nonnegative = True), x)
function = function.subs(Symbol('x', nonpositive = True), x)
function = function.subs(Symbol('x', positive = True), x)
self.func[i] = function
def init_cache(self):
"""
Procedure Name: init_cache
Purpose: Initializes the cache for the random variable
Arguments: 1. self: the random variable
Output: 1. The cache attribute for the random variable
is initialized
"""
self.cache={}
def latex(self):
"""
Procedure Name: latex
Purpose: Outputs the latex code for the random variable
Arugments: 1.self: the random variable
Output: 1. The latex code for the random variable
"""
if self.ftype[0] not in ['continuous','Discrete']:
error_string='latex is only designed to work for continuous'
error_string+=' distributions and discrete distributions that '
error_string+='are represented in functional form'
raise RVError(error_string)
# Generate the pieces of the piecewise function
piece_list=[]
for i in range(len(self.func)):
f=self.func[i]
sup='x>=%s'%(self.support[i])
tup=(f,eval(sup))
piece_list.append(tup)
piece_list.append((0,True))
piece_input='Piecewise('+str(piece_list)+')'
piece2=piece_input.replace(piece_input[10],'')
n=len(piece2)-2
piece3=piece2.replace(piece2[n],'')
# Create symbols for use in the piecewise
# function display
theta=Symbol('theta');kappa=Symbol('kappa');
a=Symbol('a');b=Symbol('b');c=Symbol('c');
p=Symbol('p');N=Symbol('N');alpha=Symbol('alpha')
beta=Symbol('beta');mu=Symbol('mu');sigma=Symbol('sigma')
p=eval(piece3)
return latex(p)
def save(self, filename = None):
"""
Procedure Name: save
Purpose: Saves a random variable to disk in binary format
Arguments: 1. self: the random variable
2. filename: the name of the file that will
store the random variable. If none is
specified, the most recently used file
name is used
Output: 1. The random variable is stored to disk
"""
if filename == None:
if self.filename == None:
err_string = 'Please specify a file name, this random '
err_string += 'has never been saved before '
raise RVError(err_string)
else:
filename = self.filename
else:
self.filename = filename
fileObject = open(filename, 'wb')
pickle.dump(self, fileObject)
def simplify(self, assumption=None):
"""
Procedure Name: simplify
Purpose: Uses assumptions to help simplify the random variable
Arguments: 1. self: the random variable.
Output: 1. A list of assumptions for each segment in the random
variable
"""
for i, segment in enumerate(self.func):
if self.support[i] < 0 and self.support[i+1] <= 0:
x2 = Symbol('x2',negative=True)
elif self.support[i+1] > 0 and self.support[i] >= 0:
x2 = Symbol('x2',positive=True)
else:
x2 = Symbol('x2')
new_func = segment.subs(x,x2)
new_func = simplify(new_func)
self.func[i] = new_func.subs(x2,x)
new_func = []
new_support = []
for i in range(len(self.func)):
if i == 0:
new_func.append(self.func[i])
new_support.append(self.support[i])
else:
if self.func[i] != self.func[i-1]:
new_func.append(self.func[i])
new_support.append(self.support[i])
new_support.append(self.support[-1])
self.func = new_func
self.support = new_support
self.display()
def verifyPDF(self):
"""
Procedure Name: verifyPDF
Purpose: Verifies whether or not the random variable is valid. It first
checks to make sure the pdf of the random variable
integrates to one. It then checks to make sure the random
variable is strictly positive.
Arguments: 1. self: the random variable.
Output: 1. A print statement indicating the area under the pdf
and a print statement indicating whether or not the
random variable is valid.
"""
# If the random variable is continuous, verify the PDF
if self.ftype[0]=='continuous':
# Check to ensure that the distribution is fully
# specified
for piece in self.func:
func_symbols=piece.atoms(Symbol)
if len(func_symbols)>1:
err_string='distribution must be fully'
err_string+=' specified'
raise RVError(err_string)
# Convert the random variable to PDF form
X_dummy=PDF(self)
# Check to ensure that the area under the PDF is 1
print 'Now checking for area...'
area=0
for i in range(len(X_dummy.func)):
val=integrate(X_dummy.func[i],(x,X_dummy.support[i],
X_dummy.support[i+1]))
area+=val
print 'The area under f(x) is: %s'%(area)
# Check absolute value
print 'Now checking for absolute value...'
#
# The following code should work in future versions of SymPy
# Currently, Sympy is having difficulty consistently integrating
# the absolute value of a function symbolically
#
#abs_area=0
#for i in range(len(X_dummy.func)):
#val=integrate(Abs(X_dummy.func[i],(x,X_dummy.support[i],
# X_dummy.support[i+1]))
#abs_area+=val
abs_flag=True
val_list=[]
quant_list=[.1,.2,.3,.4,.5,.6,.7,.8,.9]
for i in range(len(quant_list)):
val=self.variate(s=quant_list[i])[0]
val_list.append(val)
for i in range(len(val_list)):
if val_list[i]<0:
abs_flag=False
print 'The pdf of the random variable:'
print '%s'%(X_dummy.func)
print 'continuous pdf with support %s'%(X_dummy.support)
if area>.9999 and area<1.00001 and abs_flag==True:
print 'is valid'
return True
else:
print 'is not valid'
return False
# If the random variable is in a discrete functional form,
# verify the PDF
if self.ftype[0]=='Discrete':
# Convert the random variable to PDF form
X_dummy=PDF(self)
# Check to ensure that the area under the PDF is 1
print 'Now checking for area...'
area=0
for i in range(len(X_dummy.func)):
val=summation(X_dummy.func[i],(x,X_dummy.support[i],
X_dummy.support[i+1]))
area+=val
print 'The area under f(x) is: %s'%(area)
# Check absolute value
print 'Now checking for absolute value...'
abs_flag=True
val_list=[]
quant_list=[.1,.2,.3,.4,.5,.6,.7,.8,.9]
for i in range(len(quant_list)):
val=self.variate(s=quant_list[i])[0]
val_list.append(val)
for i in range(len(val_list)):
if val_list[i]<0:
abs_flag=False
print 'The pdf of the random variable:'
print '%s'%(X_dummy.func)
print 'discrete pdf with support %s'%(X_dummy.support)
if area>.9999 and area<1.00001 and abs_flag==True:
print 'is valid'
return True
else:
print 'is not valid'
return False
# If the random variable is discrete, verify the PDF
if self.ftype[0]=='discrete':
# Convert the random variable to PDF form
X_dummy=PDF(self)
# Check to ensure that the area under the PDF is 1
print 'Now checking for area...'
area=sum(X_dummy.func)
#for i in range(len(self.support)):
# area+=self.func[i]
print 'The area under f(x) is: %s'%(area)
# Check for absolute value
print 'Now checking for absolute value...'
abs_flag=True
for i in range(len(self.func)):
if self.func[i]<0:
abs_flag=False
print 'The pdf of the random variable'
if area>.9999 and area<1.0001 and abs_flag==True:
print 'is valid'
else:
print 'is not valid'
def variate(self,n=1,s=None,sensitivity=None,method='newton-raphson'):
"""
Procedure Name: variate
Purpose: Generates a list of n random variates from the random variable
using the Newton-Raphson Method
Arguments: 1. self: the random variable
2. n: the number of variates (default is n=1)
3. s: the percentile of the variate (default is random)
4. method: specifies the method for variate generaton
valid methods are:
1. 'newton-raphson'
2. 'inverse'
5. sensitivity: value indicating how close two interations
must be for the variate generator to reach
convergence. (default is .1% of the mean)
"""
# Check to see if the user specified a valid method
method_list=['newton-raphson','inverse']
if method not in method_list:
error_string='an invalid method was specified'
raise RVError(error_string)
# If the inverse method is specified, compute variates using
# the IDF function
if method=='inverse':
Xidf=IDF(self)
varlist=[IDF(Xidf,random()) for i in range(1,n+1)]
return varlist
# Find the cdf and pdf functions (to avoid integrating for
# each variate
cdf=CDF(self)
pdf=PDF(self)
mean=Mean(self)
if sensitivity==None:
# If sensitivity is not specified, set the sensitivity to be
# .1% of the mean value for random variates
if s==None:
sensitivity=abs(0000.1*mean)
# If a percentile is specified, set sensitivity to be .01%
# of the mean value
else:
sensitivity=abs(0000.1*mean)
# Create a list of variates
varlist=[]
for i in range(n):
guess_last = oo
guess=mean
if s==None:
val=random()
else:
val=s
while abs(guess_last - guess) > sensitivity:
guess_last = guess
try:
if len(self.func)==1:
guess=(guess-
((cdf.func[0].subs(x,guess)-val)/
pdf.func[0].subs(x,guess)))
guess=guess.evalf()
else:
guess=(guess-((CDF(cdf,guess)-val)/
PDF(pdf,guess))).evalf()
except:
if guess>self.support[len(self.support)-1]:
cfunc=cdf.func[len(self.func)-1].subs(x,guess)
pfunc=pdf.func[len(self.func)-1].subs(x,guess)
guess=(guess-((cfunc-val)/pfunc)).evalf()
if guess<self.support[0]:
cfunc=cdf.func[0].subs(x,guess)
pfunc=pdf.func[0].subs(x,guess)
guess=(guess-((cfunc-val)/pfunc)).evalf()
#print guess
varlist.append(guess)
varlist.sort()
return varlist
"""
Conversion Procedures:
1. CDF(RVar,value)
2. CHF(RVar,value)
3. HF(RVar,value)
4. IDF(RVar,value)
5. PDF(RVar,value)
6. SF(RVar,value)
7. BootstrapRV(varlist)
8. Convert(RVar,inc)
"""
def check_value(value,sup):
# Not intended for use by end user
"""
Procedure Name: check_value
Purpose: Check to see if a value passed to CDF,CHF,HF,PDF or
SF is in the support of the random variable
Arguments: 1. value: The value passed to RV procedure
2. sup: The support of the RV in the procedure
Output: 1. True if the value given is within the support
2. False otherwise
"""
if value==x:
return True
else:
max_idx=len(sup)-1
if float(value)<float(sup[0]) or float(value)>float(sup[max_idx]):
return False
else:
return True
def CDF(RVar,value=x,cache=False):
"""
Procedure Name: CDF
Purpose: Compute the cdf of a random variable
Arguments: 1. RVar: A random variable
2. value: An integer or floating point number
3. cache: A binary variable. If True, the result will
be stored in memory for later use. (default is False)
Output: 1. CDF of a random variable (if value not specified)
2. Value of the CDF at a given point
(if value is specified)
"""
# Check to make sure the value given is within the random
# variable's support
if value.__class__.__name__!='Symbol':
if value > RVar.support[-1]:
return 1
if value < RVar.support[0]:
return 0
# If the CDF of the random variable is already cached in memory,
# retriew the value of the CDF and return in.
if RVar.cache != None and 'cdf' in RVar.cache:
if value==x:
return RVar.cache['cdf']
else:
return CDF(RVar.cache['cdf'],value)
# If the distribution is continous, find and return the distribution
# of the random variable
if RVar.ftype[0]=='continuous':
# Short-cut for Weibull, jump straight to the closed form CDF
if 'Weibull' in RVar.__class__.__name__:
if value == x:
Fx = RV(RVar.cdf,[0,oo],['continuous','cdf'])
return Fx
else:
return simplify(RVar.cdf.subs(x,value))
# If the random variable is already a cdf, nothing needs to
# be done
if RVar.ftype[1]=='cdf':
if value==x:
return RVar
if value!=x:
for i in range(len(RVar.support)):
if value>=RVar.support[i] and value<=RVar.support[i+1]:
cdfvalue=RVar.func[i].subs(x,value)
return simplify(cdfvalue)
# If the random variable is a sf, find and return the cdf of the
# random variable
if RVar.ftype[0]=='sf':
X_dummy=SF(RVar)
# Compute the sf for each segment
cdflist=[]
for i in range(len(X_dummy.func)):
cdflist.append(1-X_dummy.func[i])
# If no value is specified, return the sf function
if value==x:
cdf_func=RV(cdflist,X_dummy.support,['continuous','cdf'])
if cache==True:
RVar.add_to_cache('cdf',cdffunc)
return cdffunc
# If not, return the value of the cdf at the specified value
else:
for i in range(len(X_dummy.support)):
if value>=X_dummy.support[i] and value<=X_dummy.support[i+1]:
cdfvalue=cdflist[i].subs(x,value)
return simplify(cdfvalue)
# If the random variable is not a cdf or sf, compute the pdf of
# the random variable, and then compute the cdf by integrating
# over each segment of the random variable
else:
X_dummy=PDF(RVar)
# Substitue the dummy variable 't' into the dummy rv
funclist=[]
for i in range(len(X_dummy.func)):
number_types = [int,float,'sympy.core.numbers.Rational']
if type(X_dummy.func[i]) not in number_types:
newfunc=X_dummy.func[i].subs(x,t)
else:
newfunc=X_dummy.func[i]
funclist.append(newfunc)
# Integrate to find the cdf
cdflist=[]
for i in range(len(funclist)):
cdffunc=integrate(funclist[i],(t,X_dummy.support[i],x))
# Adjust the constant of integration
if i!=0:
const=(cdflist[i-1].subs(x,X_dummy.support[i])-
cdffunc.subs(x,X_dummy.support[i]))
cdffunc=cdffunc+const
if i==0:
const=0-cdffunc.subs(x,X_dummy.support[i])
cdffunc=cdffunc+const
cdflist.append(simplify(cdffunc))
# If no value is specified, return the cdf
if value==x:
cdffunc=RV(cdflist,X_dummy.support,['continuous','cdf'])
if cache==True:
RVar.add_to_cache('cdf',cdffunc)
return cdffunc
# If a value is specified, return the value of the cdf
if value!=x:
for i in range(len(RVar.support)):
if value>=RVar.support[i] and value<=RVar.support[i+1]:
cdfvalue=cdflist[i].subs(x,value)
return simplify(cdfvalue)
# If the distribution is in discrete functional, find and return the
# distribution of the random variable
if RVar.ftype[0]=='Discrete':
# If the support is finite, then convert to expanded form and compute
# the CDF
if oo not in RVar.support:
if -oo not in RVar.support:
RVar2=Convert(RVar)
return CDF(RVar2,value)
# If the random variable is already a cdf, nothing needs to
# be done
if RVar.ftype[1]=='cdf':
if value==x:
return RVar
if value!=x:
for i in range(len(RVar.support)):
if value>=RVar.support[i] and value<=RVar.support[i+1]:
cdfvalue=RVar.func[i].subs(x,value)
return simplify(cdfvalue)
# If the random variable is a sf, find and return the cdf of the
# random variable
if RVar.ftype[0]=='sf':
X_dummy=SF(RVar)
# Compute the sf for each segment
cdflist=[]
for i in range(len(X_dummy.func)):
cdflist.append(1-X_dummy.func[i])
# If no value is specified, return the sf function
if value==x:
cdffunc=RV(cdflist,X_dummy.support,['Discrete','cdf'])
if cache==True:
RVar.add_to_cache('cdf',cdffunc)
return cdffunc
# If not, return the value of the cdf at the specified value
else:
for i in range(len(X_dummy.support)):
if value>=X_dummy.support[i] and value<=X_dummy.support[i+1]:
cdfvalue=cdflist[i].subs(x,value)
return simplify(cdfvalue)
# If the random variable is not a cdf or sf, compute the pdf of
# the random variable, and then compute the cdf by summing
# over each segment of the random variable
else:
X_dummy=PDF(RVar)
# Substitue the dummy variable 't' into the dummy rv
funclist=[]
for i in range(len(X_dummy.func)):
newfunc=X_dummy.func[i].subs(x,t)
funclist.append(newfunc)
# Sum to find the cdf
cdflist=[]
for i in range(len(funclist)):
cdffunc=Sum(funclist[i],(t,X_dummy.support[i],x)).doit()
# Adjust the constant of integration
if i!=0:
const=(cdflist[i-1].subs(x,X_dummy.support[i])-
cdffunc.subs(x,X_dummy.support[i]))
#cdffunc=cdffunc+const
if i==0:
const=0-cdffunc.subs(x,X_dummy.support[i])
#cdffunc=cdffunc+const
cdflist.append(simplify(cdffunc))
# If no value is specified, return the cdf
if value==x:
cdffunc=RV(cdflist,X_dummy.support,['Discrete','cdf'])
if cache==True:
RVar.add_to_cache('cdf',cdffunc)
return cdffunc
# If a value is specified, return the value of the cdf
if value!=x:
for i in range(len(RVar.support)):
if value>=RVar.support[i] and value<=RVar.support[i+1]:
cdfvalue=cdflist[i].subs(x,value)
return simplify(cdfvalue)
# If the distribution is discrete, find and return the cdf of
# the random variable
if RVar.ftype[0]=='discrete':
# If the distribution is already a cdf, nothing needs to
# be done
if RVar.ftype[1]=='cdf':
if value==x:
return RVar
if value!=x:
for i in range(len(RVar)):
if RVar.support[i]==value:
return RVar.func[i]
if RVar.support[i]<value:
if RVar.support[i+1]>value:
return RVar.func[i]
# If the distribution is a sf, find the cdf by reversing the
# function list
if RVar.ftype[1] in ['sf','chf','hf']:
X_dummy=SF(RVar)
newfunc=[]
for i in reversed(range(len(X_dummy.func))):
newfunc.append(X_dummy.func[i])
Xsf=RV(newfunc,X_dummy.support,['discrete','cdf'])
if value==x:
if cache==True:
RVar.add_to_cache('cdf',Xsf)
return Xsf
if value!=x:
X_dummy=CDF(X_dummy)
for i in range(len(X_dummy.support)):
if X_dummy.support[i]==value:
return X_dummy.func[i]
if X_dummy.support[i]<value:
if X_dummy.support[i+1]>value:
return X_dummy.func[i]
# If the distribution is not a cdf or sf, find the pdf and
# then compute the cdf by summation
else:
X_dummy=PDF(RVar)
cdffunc=[]
area=0
for i in range(len(X_dummy.support)):
area+=X_dummy.func[i]
cdffunc.append(area)
if value==x:
cdffunc=RV(cdffunc,X_dummy.support,['discrete','cdf'])
if cache==True:
RVar.add_to_cache('cdf',cdffunc)
return cdffunc
if value!=x:
X_dummy=CDF(X_dummy)
for i in range(len(X_dummy.support)):
if X_dummy.support[i]==value:
return X_dummy.func[i]
if X_dummy.support[i]<value:
if X_dummy.support[i+1]>value:
return X_dummy.func[i]
def CHF(RVar,value=x,cache=False):
"""
Procedure Name: CHF
Purpose: Compute the chf of a random variable
Arguments: 1. RVar: A random variable
2. value: An integer or floating point number
(optional)
Output: 1. CHF of a random variable (if value not specified)
2. Value of the CHF at a given point
(if value is specified)
"""
# Check to make sure the value given is within the random
# variable's support
if value.__class__.__name__!='Symbol':
if value>RVar.support[-1] or value<RVar.support[0]:
string='Value is not within the support of the random variable'
raise RVError(string)
# If the CHF of the random variable is already cached in memory,
# retriew the value of the CHF and return in.
if RVar.cache != None and 'chf' in RVar.cache:
if value==x:
return RVar.cache['chf']
else:
return CHF(RVar.cache['chf'],value)
# If the distribution is continuous, find and return the chf of
# the random variable
if RVar.ftype[0]=='continuous':
# If the distribution is already a chf, nothing needs to
# be done
if RVar.ftype[1]=='chf':
if value==x:
return RVar
if value!=x:
for i in range(len(RVar.support)):
if value>=RVar.support[i]:
if value<=RVar.support[i+1]:
chfvalue=RVar.func[i].subs(x,value)
return simplify(chfvalue)
# Otherwise, find and return the chf
else:
X_dummy=SF(RVar)
# Generate a list of sf functions
sflist=[]
for i in range(len(X_dummy.func)):
sflist.append(X_dummy.func[i])
# Generate chf functions
chffunc=[]
for i in range(len(sflist)):
newfunc=-ln(sflist[i])
chffunc.append(simplify(newfunc))
# If a value is not specified, return the chf of the
# random variable
if value==x:
chffunc=RV(chffunc,X_dummy.support,['continuous','chf'])
if cache==True:
RVar.add_to_cache('chf',chffunc)
return chffunc
if value!=x:
for i in range(len(RVar.support)):
if value>=RVar.func[i]:
if value<=RVar.support[i+1]:
chfvalue=chffunc[i].subs(x,value)
return simplify(chfvalue)
# If the distribution is a discrete function, find and return the chf of
# the random variable
if RVar.ftype[0]=='Discrete':
# If the support is finite, then convert to expanded form and compute
# the CHF
if oo not in RVar.support:
if -oo not in RVar.support:
RVar2=Convert(RVar)
return CHF(RVar2,value)
# If the distribution is already a chf, nothing needs to
# be done
if RVar.ftype[1]=='chf':
if value==x:
return RVar
if value!=x:
for i in range(len(RVar.support)):
if value>=RVar.support[i]:
if value<=RVar.support[i+1]:
chfvalue=RVar.func[i].subs(x,value)
return simplify(chfvalue)
# Otherwise, find and return the chf
else:
X_dummy=SF(RVar)
# Generate a list of sf functions
sflist=[]
for i in range(len(X_dummy.func)):
sflist.append(X_dummy.func[i])
# Generate chf functions
chffunc=[]
for i in range(len(sflist)):
newfunc=-ln(sflist[i])
chffunc.append(simplify(newfunc))
# If a value is not specified, return the chf of the
# random variable
if value==x:
chfrv=RV(chffunc,X_dummy.support,['Discrete','chf'])
if cache==True:
RVar.add_to_cache('chf',chfrv)
return chfrv
if value!=x:
for i in range(len(RVar.support)):
if value>=RVar.func[i]:
if value<=RVar.support[i+1]:
chfvalue=chffunc[i].subs(x,value)
return simplify(chfvalue)
# If the random variable is discrete, find and return the chf
if RVar.ftype[0]=='discrete':
# If the distribution is already a chf, nothing needs to
# be done
if RVar.ftype[1]=='chf':
if value==x:
return RVar
if value!=x:
if value not in RVar.support:
return 0
else:
return RVar.func[RVar.support.index(value)]
# Otherwise, use the survivor function to find the chf
else:
X_sf=SF(RVar)
chffunc=[]
for i in range(len(X_sf.func)):
chffunc.append(-log(X_sf.func[i]))
if value==x:
chfrv=RV(chffunc,X_sf.support,['discrete','chf'])
if cache==True:
RVar.add_to_cache('chf',chfrv)
return chfrv
if value!=x:
if value not in RVar.support:
return 0
else:
return chffunc[RVar.support.index(value)]
def HF(RVar,value=x,cache=False):
"""
Procedure Name: HF
Purpose: Compute the hf of a random variable
Arguments: 1. RVar: A random variable
2. value: An integer or floating point number
(optional)
Output: 1. HF of a random variable (if value not specified)
2. Value of the HF at a given point
(if value is specified)
"""
# Check to make sure the value given is within the random
# variable's support
if value.__class__.__name__!='Symbol':
if value>RVar.support[-1] or value<RVar.support[0]:
string='Value is not within the support of the random variable'
raise RVError(string)
# If the HF of the random variable is already cached in memory,
# retriew the value of the HF and return in.
if RVar.cache != None and 'hf' in RVar.cache:
if value==x:
return RVar.cache['hf']
else:
return HF(RVar.cache['hf'],value)
# If the distribution is continuous, find and return the hf of
# the random variable
if RVar.ftype[0]=='continuous':
# If the distribution is already a hf, nothing needs to be
# done
if RVar.ftype[1]=='hf':
if value==x:
return RVar
if value!=x:
for i in range(len(RVar.support)):
if value>=RVar.support[i]:
if value<=RVar.support[i+1]:
hfvalue=RVar.func[i].subs(x,value)
return simplify(hfvalue)
# If the distribution is in chf form, use differentiation
# to find the hf
if RVar.ftype[1]=='chf':
X_dummy=CHF(RVar)
# Generate a list of hf functions
hflist=[]
for i in range(len(X_dummy.func)):
newfunc=diff(X_dummy.func[i],x)
hflist.append(newfunc)
if value==x:
hfrv=RV(hflist,X_dummy.support,['continuous','hf'])
if cache==True:
RVar.add_to_cache('hf',hfrv)
return hfrv
if value!=x:
for i in range(len(RVar.support)):
if value>=RVar.support[i]:
if value<=RVar.support[i+1]:
hfvalue=hflist[i].subs(x,value)
return simplify(hfvalue)
# In all other cases, use the pdf and the sf to find the hf
else:
X_pdf=PDF(RVar).func
X_sf=SF(RVar).func
# Create a list of hf functions
hflist=[]
for i in range(len(RVar.func)):
hfunc=(X_pdf[i])/(X_sf[i])
hflist.append(simplify(hfunc))
if value==x:
hfrv=RV(hflist,RVar.support,['continuous','hf'])
if cache==True:
RVar.add_to_cache('hf',hfrv)
return hfrv
if value!=x:
for i in range(len(RVar.support)):
if value>=RVar.support[i]:
if value<=RVar.support[i+1]:
hfvalue=hflist[i].subs(x,value)
return simplify(hfvalue)
# If the distribution is a discrete function, find and return the hf of
# the random variable
if RVar.ftype[0]=='Discrete':
# If the support is finite, then convert to expanded form and compute
# the HF
if oo not in RVar.support:
if -oo not in RVar.support:
RVar2=Convert(RVar)
return HF(RVar2,value)
# If the distribution is already a hf, nothing needs to be
# done
if RVar.ftype[1]=='hf':
if value==x:
return RVar
if value!=x:
for i in range(len(RVar.support)):
if value>=RVar.support[i]:
if value<=RVar.support[i+1]:
hfvalue=RVar.func[i].subs(x,value)
return simplify(hfvalue)
# In all other cases, use the pdf and the sf to find the hf
else:
X_pdf=PDF(RVar).func
X_sf=SF(RVar).func
# Create a list of hf functions
hflist=[]
for i in range(len(RVar.func)):
hfunc=(X_pdf[i])/(X_sf[i])
hflist.append(simplify(hfunc))
if value==x:
hfrv=RV(hflist,RVar.support,['Discrete','hf'])
if cache==True:
RVar.add_to_cache('hf',hfrv)
return hfrv
if value!=x:
for i in range(len(RVar.support)):
if value>=RVar.support[i]:
if value<=RVar.support[i+1]:
hfvalue=hflist[i].subs(x,value)
return simplify(hfvalue)
# If the random variable is discrete, find and return the hf
if RVar.ftype[0]=='discrete':
# If the distribution is already a hf, nothing needs
# to be done
if RVar.ftype[1]=='hf':
if value==x:
return RVar
if value!=x:
if value not in RVar.support:
return 0
else:
return RVar.func[RVar.support.index(value)]
# Otherwise, use the pdf and sf to find the hf
else:
X_pdf=PDF(RVar)
X_sf=SF(RVar)
hffunc=[]
for i in range(len(X_pdf.func)):
hffunc.append(X_pdf.func[i]/X_sf.func[i])
if value==x:
hfrv=RV(hffunc,X_pdf.support,['discrete','hf'])
if cache==True:
RVar.add_to_cache('hf',hfrv)
return hfrv
if value!=x:
if value not in X_pdf.support:
return 0
else:
return hffunc[X_pdf.support.index(value)]
def IDF(RVar,value=x,cache=False):
"""
Procedure Name: IDF
Purpose: Compute the idf of a random variable
Arguments: 1. RVar: A random variable
2. value: An integer or floating point number
(optional)
Output: 1. IDF of a random variable (if value not specified)
2. Value of the IDF at a given point
(if value is specified)
"""
# Check to make sure the value given is within the random
# variable's support
if value.__class__.__name__!='Symbol':
if value>1 or value<0:
string='Value is not within the support of the random variable'
raise RVError(string)
# If the IDF of the random variable is already cached in memory,
# retriew the value of the IDF and return in.
if RVar.cache != None and 'idf' in RVar.cache:
if value==x:
return RVar.cache['idf']
else:
return IDF(RVar.cache['idf'],value)
# If the distribution is continuous, find and return the idf
# of the random variable
if RVar.ftype[0]=='continuous':
if value==x:
if RVar.ftype[1]=='idf':
return RVar
# Convert the random variable to its CDF form
X_dummy=CDF(RVar)
# Create values used to check for correct inverse
check=[]
for i in range(len(X_dummy.support)-1):
if X_dummy.support[i]==-oo and X_dummy.support[i+1]==oo:
check.append(0)
elif X_dummy.support[i]==-oo and X_dummy.support[i+1]!=oo:
check.append(X_dummy.support[i+1]-1)
elif X_dummy.support[i]!=-oo and X_dummy.support[i+1]==oo:
check.append(X_dummy.support[i]+1)
else:
check.append((X_dummy.support[i]+
X_dummy.support[i+1])/2)
# Use solve to create a list of candidate inverse functions
# Check to see which of the candidate inverse functions is correct
idffunc=[]
for i in range(len(X_dummy.func)):
invlist=solve(X_dummy.func[i]-t,x)
if len(invlist)==1:
idffunc.append(invlist[0])
else:
# The flag is used to determine if two separate inverses
# could represent the inverse of the CDF. If this is the
# case, an exception is raised
flag=False
for j in range(len(invlist)):
subsfunc=X_dummy.func[i]
val=invlist[j].subs(t,subsfunc.subs(x,check[i])).evalf()
if abs(val-check[i])<.00001:
if flag==True:
error_string='Could not find the'
error_string+=' correct inverse'
raise RVError(error_string)
idffunc.append(invlist[j])
flag=True
# Create a list of supports for the IDF
idfsup=[]
for i in range(len(X_dummy.support)):
idfsup.append(CDF(X_dummy,X_dummy.support[i]))
# Replace t with x
idffunc2=[]
for i in range(len(idffunc)):
func=idffunc[i].subs(t,x)
idffunc2.append(simplify(func))
# Return the IDF
idfrv=RV(idffunc2,idfsup,['continuous','idf'])
if cache==True:
RVar.add_to_cache('idf',idfrv)
return idfrv
# If a value is specified, return the value of the IDF at x=value
if value!=x:
X_dummy=IDF(RVar)
for i in range(len(X_dummy.support)):
if value>=X_dummy.support[i] and value<=X_dummy.support[i+1]:
idfvalue=X_dummy.func[i].subs(x,value)
return simplify(idfvalue)
# If the distribution is a discrete function, find and return the idf
# of the random variable
if RVar.ftype[0]=='Discrete':
# If the support is finite, then convert to expanded form and compute
# the IDF
if oo not in RVar.support:
if -oo not in RVar.support:
RVar2=Convert(RVar)
return IDF(RVar2,value)
if value==x:
if RVar.ftype[1]=='idf':
return self
# Convert the random variable to its CDF form
X_dummy=CDF(RVar)
# Create values used to check for correct inverse
check=[]
for i in range(len(X_dummy.support)-1):
if X_dummy.support[i]==-oo and X_dummy.support[i+1]==oo:
check.append(0)
elif X_dummy.support[i]==-oo and X_dummy.support[i+1]!=oo:
check.append(X_dummy.support[i+1]-1)
elif X_dummy.support[i]!=-oo and X_dummy.support[i+1]==oo:
check.append(X_dummy.support[i]+1)
else:
check.append((X_dummy.support[i]+
X_dummy.support[i+1])/2)
# Use solve to create a list of candidate inverse functions
# Check to see which of the candidate inverse functions is correct
idffunc=[]
for i in range(len(X_dummy.func)):
invlist=solve(X_dummy.func[i]-t,x)
if len(invlist)==1:
idffunc.append(invlist[0])
else:
# The flag is used to determine if two separate inverses
# could represent the inverse of the CDF. If this is the
# case, an exception is raised
flag=False
for j in range(len(invlist)):
subsfunc=X_dummy.func[i]
val=invlist[j].subs(t,subsfunc.subs(x,check[i])).evalf()
if abs(val-check[i])<.00001:
if flag==True:
error_string='Could not find the correct'
error_string+=' inverse'
raise RVError(error_string)
idffunc.append(invlist[j])
flag=True
# Create a list of supports for the IDF
idfsup=[]
for i in range(len(X_dummy.support)):
idfsup.append(CDF(X_dummy,X_dummy.support[i]))
# Replace t with x
idffunc2=[]
for i in range(len(idffunc)):
func=idffunc[i].subs(t,x)
idffunc2.append(simplify(func))
# Return the IDF
idfsup[0] = 0
idfrv=RV(idffunc2,idfsup,['Discrete','idf'])
if cache==True:
RVar.add_to_cache('idf',idfrv)
return idfrv
# If a value is specified, find thevalue of the idf
if value!=x:
X_dummy=IDF(RVar)
for i in range(len(X_dummy.support)):
if value>=X_dummy.support[i] and value<=X_dummy.support[i+1]:
idfvalue=X_dummy.func[i].subs(x,value)
return simplify(idfvalue)
#varlist=RVar.variate(s=value)
#return varlist[0]
# If the distribution is discrete, find and return the idf of the
# random variable
if RVar.ftype[0]=='discrete':
# If the distribution is already an idf, nothing needs to be done
if RVar.ftype[1]=='idf':
if value==x:
return RVar
if value!=x:
for i in range(len(X_dummy.support)):
if X_dummy.support[i]==value:
return X_dummy.func[i]
if X_dummy.support[i]<value:
if X_dummy.support[i+1]>value:
return X_dummy.func[i+1]
# Otherwise, find the cdf, and then invert it
else:
# If the distribution is a chf or hf, convert to an sf first
if RVar.ftype[1]=='chf' or RVar.ftype[1]=='hf':
X_dummy0=SF(RVar)
X_dummy=CDF(X_dummy0)
else:
X_dummy=CDF(RVar)
if value==x:
return RV(X_dummy.support,X_dummy.func,['discrete','idf'])
if value!=x:
X_dummy=RV(X_dummy.support,X_dummy.func,['discrete','idf'])
for i in range(len(X_dummy.support)):
if X_dummy.support[i]==value:
return X_dummy.func[i]
if X_dummy.support[i]<value:
if X_dummy.support[i+1]>value:
return X_dummy.func[i+1]
def PDF(RVar,value=x,cache=False):
"""
Procedure Name: PDF
Purpose: Compute the pdf of a random variable
Arguments: 1. RVar: A random variable
2. value: An integer or floating point number (optional)
Output: 1. PDF of a random variable (if value not specified)
2. Value of the PDF at a given point (if value is specified)
"""
# Check to make sure the value given is within the random
# variable's support
if value.__class__.__name__!='Symbol':
if value>RVar.support[-1] or value<RVar.support[0]:
string='Value is not within the support of the random variable'
raise RVError(string)
# If the PDF of the random variable is already cached in memory,
# retriew the value of the PDF and return in.
if RVar.cache != None and 'pdf' in RVar.cache:
if value==x:
return RVar.cache['pdf']
else:
return PDF(RVar.cache['pdf'],value)
# If the distribution is continuous, find and return the pdf of the
# random variable
if RVar.ftype[0]=='continuous':
# If the distribution is already a pdf, nothing needs to be done
if RVar.ftype[1]=='pdf':
if value==x:
return RVar
if value!=x:
for i in range(len(RVar.support)):
if value>=RVar.support[i] and value<=RVar.support[i+1]:
pdfvalue=RVar.func[i].subs(x,value)
return simplify(pdfvalue)
# If the distribution is a hf or chf, use integration to find the pdf
if RVar.ftype[1]=='hf' or RVar.ftype[1]=='chf':
X_dummy=HF(RVar)
# Substitute the dummy variable 't' into the hazard function
hfsubslist=[]
for i in range(len(X_dummy.func)):
newfunc=X_dummy.func[i].subs(x,t)
hfsubslist.append(newfunc)
# Integrate the hazard function
intlist=[]
for i in range(len(hfsubslist)):
newfunc=integrate(hfsubslist[i],(t,X_dummy.support[i],x))
# Correct the constant of integration
if i!=0:
const=intlist[i-1].subs(x,X_dummy.support[i])
const=const-newfunc.subs(x,X_dummy.support[i])
newfunc=newfunc+const
if i==0:
const=0-newfunc.subs(x,X_dummy.support[i])
newfunc=newfunc+const
intlist.append(simplify(newfunc))
# Multiply to find the pdf
pdffunc=[]
for i in range(len(intlist)):
newfunc=X_dummy.func[i]*exp(-intlist[i])
pdffunc.append(simplify(newfunc))
if value==x:
pdfrv=RV(pdffunc,RVar.support,['continuous','pdf'])
if cache==True:
RVar.add_to_cache('pdf',pdfrv)
return pdfrv
if value!=x:
for i in range(len(X_dummy.support)):
if value>=X_dummy.support[i]:
if value<=X_dummy.support[i+1]:
pdfvalue=pdffunc[i].subs(x,value)
return simplify(pdfvalue)
# In all other cases, find the pdf by differentiating the cdf
else:
X_dummy=CDF(RVar)
if value==x:
pdflist=[]
for i in range(len(X_dummy.func)):
pdflist.append(diff(X_dummy.func[i],x))
pdfrv=RV(pdflist,RVar.support,['continuous','pdf'])
if cache==True:
RVar.add_to_cache('pdf',pdfrv)
return pdfrv
if value!=x:
for i in range(len(X_dummy.support)):
for i in range(len(X_dummy.support)):
if value>=X_dummy.support[i]:
if value<=X_dummy.support[i+1]:
pdffunc=diff(X_dummy.func[i],x)
pdfvalue=pdffunc.subs(x,value)
return simplify(pdfvalue)
# If the distribution is a discrete function, find and return the pdf
if RVar.ftype[0]=='Discrete':
# If the distribution is already a pdf, nothing needs to be done
if RVar.ftype[1]=='pdf':
if value==x:
return RVar
if value!=x:
for i in range(len(RVar.support)):
if value>=RVar.support[i] and value<=RVar.support[i+1]:
pdfvalue=RVar.func[i].subs(x,value)
return simplify(pdfvalue)
# If the support is finite, then convert to expanded form and compute
# the PDF
if oo not in RVar.support:
if -oo not in RVar.support:
RVar2=Convert(RVar)
return PDF(RVar2,value)
# If the distribution is a hf or chf, use summation to find the pdf
if RVar.ftype[1]=='hf' or RVar.ftype[1]=='chf':
X_dummy=HF(RVar)
# Substitute the dummy variable 't' into the hazard function
hfsubslist=[]
for i in range(len(X_dummy.func)):
newfunc=X_dummy.func[i].subs(x,t)
hfsubslist.append(newfunc)
# Integrate the hazard function
sumlist=[]
for i in range(len(hfsubslist)):
newfunc=summation(hfsubslist[i],(t,X_dummy.support[i],x))
# Correct the constant of integration
if i!=0:
const=sumlist[i-1].subs(x,X_dummy.support[i])
const=const-newfunc.subs(x,X_dummy.support[i])
newfunc=newfunc+const
if i==0:
const=0-newfunc.subs(x,X_dummy.support[i])
newfunc=newfunc+const
intlist.append(simplify(newfunc))
# Multiply to find the pdf
pdffunc=[]
for i in range(len(intlist)):
newfunc=X_dummy.func[i]*exp(-sumlist[i])
pdffunc.append(simplify(newfunc))
if value==x:
pdfrv=RV(pdffunc,RVar.support,['Discrete','pdf'])
if cache==True:
RVar.add_to_cache('pdf',pdfrv)
return pdfrv
if value!=x:
for i in range(len(X_dummy.support)):
if value>=X_dummy.support[i] and value<=X_dummy.support[i+1]:
pdfvalue=pdffunc[i].subs(x,value)
return simplify(pdfvalue)
# In all other cases, find the pdf by differentiating the cdf
else:
X_dummy=CDF(RVar)
if value==x:
pdflist=[]
# Find the pmf by subtracting CDF(X,x)-CDF(X,x-1)
for i in range(len(X_dummy.func)):
funcX1=X_dummy.func[i]
funcX0=X_dummy.func[i].subs(x,x-1)
pmf=simplify(funcX1-funcX0)
pdflist.append(pmf)
pdfrv=RV(pdflist,RVar.support,['Discrete','pdf'])
if cache==True:
RVar.add_to_cache('pdf',pdfrv)
return pdfrv
if value!=x:
for i in range(len(X_dummy.support)):
for i in range(len(X_dummy.support)):
if value>=X_dummy.support[i]:
if value<=X_dummy.support[i+1]:
funcX1=X_dummy.func[i]
funcX0=X_dummy.func[i].subs(x,x-1)
pmf=simplify(funcX1-funcX0)
pdfvalue=pmf.subs(x,value)
return simplify(pdfvalue)
# If the distribution is discrete, find and return the pdf of the
# random variable
if RVar.ftype[0]=='discrete':
# If the distribution is already a pdf, nothing needs to be done
if RVar.ftype[1]=='pdf':
if value==x:
return RVar
if value!=x:
if value not in RVar.support:
return 0
else:
return RVar.func[RVar.support.index(value)]
# Otherwise, find the cdf of the random variable, and compute the pdf
# by finding differences
else:
X_dummy=CDF(RVar)
pdffunc=[]
for i in range(len(X_dummy.func)):
if i==0:
pdffunc.append(X_dummy.func[i])
else:
pdffunc.append(X_dummy.func[i]-X_dummy.func[i-1])
if value==x:
pdfrv=RV(pdffunc,X_dummy.support,['discrete','pdf'])
if cache==True:
RVar.add_to_cache('pdf',pdfrv)
return pdfrv
if value!=x:
if value not in X_dummy.support:
return 0
else:
return pdffunc.func[X_dummy.support.index(value)]
def SF(RVar,value=x,cache=False):
"""
Procedure Name: SF
Purpose: Compute the SF of a random variable
Arguments: 1. RVar: A random variable
2. value: An integer or floating point number (optional)
Output: 1. SF of a random variable (if value not specified)
2. Value of the SF at a given point (if value is specified)
"""
# Check to make sure the value given is within the random
# variable's support
if value.__class__.__name__!='Symbol':
if value > RVar.support[-1]:
return 0
if value < RVar.support[0]:
return 1
# If the SF of the random variable is already cached in memory,
# retriew the value of the SF and return in.
if RVar.cache != None and 'sf' in RVar.cache:
if value==x:
return RVar.cache['sf']
else:
return SF(RVar.cache['sf'],value)
# If the distribution is continuous, find and return the sf of the
# random variable
if RVar.ftype[0]=='continuous':
# If the distribution is already a sf, nothing needs to be done
if RVar.ftype[1]=='sf':
if value==x:
return RVar
else:
return 1-CDF(RVar,value)
#for i in range(len(RVar.support)):
# if value>=RVar.support[i] and value<=RVar.support[i+1]:
# sfvalue=RVar.func[i].subs(x,value)
# return simplify(sfvalue)
# If not, then use subtraction to find the sf
else:
X_dummy=CDF(RVar)
# Compute the sf for each segment
sflist=[]
for i in range(len(X_dummy.func)):
sflist.append(1-X_dummy.func[i])
if value==x:
sfrv=RV(sflist,RVar.support,['continuous','sf'])
if cache==True:
RVar.add_to_cache('sf',sfrv)
return sfrv
if value!=x:
return 1-CDF(RVar,value)
#for i in range(len(X_dummy.support)):
# if value>=X_dummy.support[i]:
# if value<=X_dummy.support[i+1]:
# sfvalue=sflist[i].subs(x,value)
# return simplify(sfvalue)
# If the distribution is discrete, find and return the sf of the
# random variable
if RVar.ftype[0]=='Discrete':
if oo not in RVar.support:
if -oo not in RVar.support:
RVar2=Convert(RVar)
return SF(RVar2,value)
# If the distribution is already a sf, nothing needs to be done
if RVar.ftype[1]=='sf':
if value==x:
return RVar
else:
return 1-CDF(RVar,value)
#for i in range(len(RVar.support)):
# if value>=RVar.support[i] and value<=RVar.support[i+1]:
# sfvalue=RVar.func[i].subs(x,value)
# return simplify(sfvalue)
# If not, then use subtraction to find the sf
else:
X_dummy=CDF(RVar)
# Compute the sf for each segment
sflist=[]
for i in range(len(X_dummy.func)):
sflist.append(1-X_dummy.func[i])
if value==x:
sfrv=RV(sflist,RVar.support,['continuous','sf'])
if cache==True:
RVar.add_to_cache('sf',sfrv)
return sfrv
if value!=x:
return 1-CDF(RVar,value)
#for i in range(len(X_dummy.support)):
# if value>=X_dummy.support[i] and
# value<=X_dummy.support[i+1]:
# sfvalue=sflist[i].subs(x,value)
# return simplify(sfvalue)
# If the distribution is a discrete function, find and return the sf of the
# random variable
if RVar.ftype[0]=='Discrete':
# If the distribution is already a sf, nothing needs to be done
if RVar.ftype[1]=='sf':
if value==x:
return RVar
else:
return 1-CDF(RVar,value)
#for i in range(len(RVar.support)):
# if value>=RVar.support[i] and value<=RVar.support[i+1]:
# sfvalue=RVar.func[i].subs(x,value)
# return simplify(sfvalue)
# If not, then use subtraction to find the sf
else:
X_dummy=CDF(RVar)
# Compute the sf for each segment
sflist=[]
for i in range(len(X_dummy.func)):
sflist.append(1-X_dummy.func[i])
if value==x:
sfrv=RV(sflist,RVar.support,['Discrete','sf'])
if cache==True:
RVar.add_to_cache('sf',sfrv)
return sfrv
if value!=x:
return 1-CDF(RVar,value)
#for i in range(len(X_dummy.support)):
# if value>=X_dummy.support[i]:
# if value<=X_dummy.support[i+1]:
# sfvalue=sflist[i].subs(x,value)
# return simplify(sfvalue)
# If the distribution is a discrete function, find and return the
# sf of the random variable
if RVar.ftype[0]=='discrete':
if value!=x:
return 1 - CDF(RVar,value)
# If the distribution is already an sf, nothing needs to be done
if RVar.ftype[1]=='sf':
if value==x:
return RVar
#
#if value not in RVar.support:
# return 0
#else:
# return RVar.func[RVar.support.index(value)]
# If the distribution is a chf use exp(-chf) to find sf
if RVar.ftype[1]=='chf':
X_dummy=CHF(RVar)
sffunc=[]
for i in range(len(X_dummy.func)):
sffunc.append(exp(-(X_dummy.func[i])))
if value==x:
sfrv=RV(sffunc,X_dummy.support,['discrete','sf'])
if cache==True:
RVar.add_to_cache('sf',sfrv)
return sfrv
if value!=x:
if value not in RVar.support:
return 0
else:
return sffunc[RVar.support.index(value)]
# If the distribution is a hf, use bootstrap rv to find sf:
if RVar.ftype[1]=='hf':
X_pdf=BootstrapRV(RVar.support)
X_hf=RVar
sffunc=[]
for i in range(len(RVar.func)):
sffunc.append(X_pdf.func[i]/X_hf.func[i])
if value==x:
sfrv=RV(sffunc,RVar.support,['discrete','sf'])
if cache==True:
RVar.add_to_cache('sf',sfrv)
return sfrv
if value!=x:
if value not in RVar.support:
return 0
else:
return sffunc[RVar.support.index(value)]
# Otherwise, find the cdf of the random variable, and reverse the
# function argument
else:
X_dummy=CDF(RVar)
newfunc=[]
for i in range(len(X_dummy.func)):
if i==0:
newfunc.append(0)
else:
newfunc.append(1-X_dummy.func[i-1])
Xsf=RV(newfunc,X_dummy.support,['discrete','sf'])
if value==x:
if cache==True:
RVar.add_to_cache('sf',Xsf)
return Xsf
if value!=x:
if value not in Xsf.support:
return 0
else:
return Xsf.func[Xsf.support.index(value)]
def BootstrapRV(varlist,symbolic=False):
"""
Procedure Name: Bootstrap RV
Purpose: Generate a discrete random variable from a list of variates
Arguments: 1. varlist: A list of variates
Output: 1. A discrete random variable, where each element in the
given variate list is equally probable
"""
# Sort the list of variables
varlist.sort()
# Find the number of elements in the list of variates
numel=len(varlist)
# Use varlist to generate the function and support for the random variable
# Count number of times element appears in varlist, divide by number
# of elements
funclist=[]
supplist=[]
for i in range(len(varlist)):
if varlist[i] not in supplist:
supplist.append(varlist[i])
funclist.append(Rational(varlist.count(varlist[i]),numel))
# Return the result as a discrete random variable
return RV(funclist,supplist,['discrete','pdf'])
def Convert(RVar,inc=1):
"""
Procedure Name: Convert
Purpose: Convert a discrete random variable from functional to
explicit form
Arguments: 1. RVar: A functional discrete random variable
2. inc: An increment value
Output: 1. A discrete random variable in explicit form
"""
# If the random variable is not in functional form, return
# an error
if RVar.ftype[0]!='Discrete':
raise RVError('The random variable must be Discrete')
# If the rv has infinite support, return an error
if (oo or -oo) in RVar.support:
raise RVError('Convert does not work for infinite support')
# Create the support of explicit discrete rv
i=RVar.support[0]
discrete_supp=[]
while i<=RVar.support[1]:
discrete_supp.append(i)
i+=inc
# Create the function values for the explicit rv
discrete_func=[]
for i in range(len(discrete_supp)):
val=RVar.func[0].subs(x,discrete_supp[i])
discrete_func.append(val)
# Return the random variable in discrete form
return RV(discrete_func,discrete_supp,
['discrete',RVar.ftype[1]])
"""
Procedures on One Random Variable
Procedures:
1. ConvolutionIID(RVar,n)
2. CoefOfVar(RVar)
3. ExpectedValue(RVar,gX)
4. Entropy(RVar)
5. Kurtosis(RVar)
6. MaximumIID(RVar,n)
7. Mean(RVar)
8. MeanDiscrete(RVar)
9. MGF(RVar)
10. MinimumIID(RVar,n)
11. OrderStat(RVar,n,r)
12. Power(Rvar,n)
13. ProductIID(RVar,n)
14. Skewness(RVar)
15. SqRt(RVar)
16. Transform(RVar,gX)
17. Truncate(RVar,[lw,up])
18. Variance(RVar)
19. VarDiscrete(RVar)
"""
def ConvolutionIID(RVar,n):
"""
Procedure Name: ConvolutionIID
Purpose: Compute the convolution of n iid random variables
Arguments: 1. RVar: A random variable
2. n: an integer
Output: 1. The convolution of n iid random variables
"""
# Check to make sure n is an integer
if type(n)!=int:
raise RVError('The second argument must be an integer')
# Compute the iid convolution
X_dummy=PDF(RVar)
X_final=X_dummy
for i in range(n-1):
X_final+=X_dummy
return PDF(X_final)
def CoefOfVar(RVar,cache=False):
"""
Procedure Name: CoefOfVar
Purpose: Compute the coefficient of variation of a random variable
Arguments: 1. RVar: A random variable
Output: 1. The coefficient of variation
"""
# If the input is a list of data, compute the CoefofVar
# for the data set
if type(RVar)==list:
Xstar=BootstrapRV(RVar)
return CoefOfVar(Xstar)
# If the COV of the random variable is already cached in memory,
# retriew the value of the COV and return in.
if RVar.cache != None and 'cov' in RVar.cache:
return RVar.cache['cov']
# Compute the coefficient of varation
expect=Mean(RVar)
sig=Variance(RVar)
cov=(sqrt(sig))/expect
cov=simplify(cov)
if cache==True:
RVar.add_to_cache('cov',cov)
return cov
def ExpectedValue(RVar,gX=x):
"""
Procedure Name: ExpectedValue
Purpose: Computes the expected value of X
Arguments: 1. RVar: A random variable
2. gX: A transformation of x
Output: 1. E(gX)
"""
# If the input is a list of data, compute the Expected Value
# for the data set
if type(RVar)==list:
Xstar=BootstrapRV(RVar)
return ExpectedValue(Xstar,gX)
# Convert the random variable to its PDF form
fx=PDF(RVar)
# If the distribution is continuous, compute the expected
# value
if fx.ftype[0]=='continuous':
Expect=0
for i in range(len(fx.func)):
Expect+=integrate(gX*fx.func[i],
(x,fx.support[i],fx.support[i+1]))
return simplify(Expect)
# If the distribution is a discrete function, compute the expected
# value
if fx.ftype[0]=='Discrete':
Expect=0
for i in range(len(fx.func)):
Expect+=summation(gX*fx.func[i],
(x,fx.support[i],fx.support[i+1]))
return simplify(Expect)
# If the distribution is discrete, compute the expected
# value
if fx.ftype[0]=='discrete':
# Transform the random variable, and then use the
# mean procedure to find the expected value
fx_support = [gX.subs(x,value) for value in fx.support]
fx_trans = RV(fx.func,fx_support,fx.ftype)
#fx_trans=Transform(fx,[[gX],[-oo,oo]])
Expect=MeanDiscrete(fx_trans)
return simplify(Expect)
def Entropy(RVar,cache=False):
"""
Procedure Name: Entropy
Purpose: Compute the entory of a random variable
Arguments: 1. RVar: A random variable
Output: 1. The entropy of a random variable
"""
# If the input is a list of data, compute the entropy
# for the data set
if type(RVar)==list:
Xstar=BootstrapRV(RVar)
return Entropy(Xstar)
# If the entropy of the random variable is already cached in memory,
# retriew the value of the entropy and return in.
if RVar.cache != None and 'entropy' in RVar.cache:
return RVar.cache['entropy']
entropy=ExpectedValue(RVar,log(x,2))
entropy=simplify(entropy)
if cache==True:
RVar.add_to_cache('entropy',entropy)
return simplify(entropy)
def Kurtosis(RVar,cache=False):
"""
Procedure Name: Kurtosis
Purpose: Compute the Kurtosis of a random variable
Arguments: 1. RVar: A random variable
Output: 1. The kurtosis of a random variable
"""
# If the input is a list of data, compute the kurtosis
# for the data set
if type(RVar)==list:
Xstar=BootstrapRV(RVar)
return Kurtosis(Xstar)
# If the kurtosis of the random variable is already cached in memory,
# retriew the value of the kurtosis and return in.
if RVar.cache != None and 'kurtosis' in RVar.cache:
return RVar.cache['kurtosis']
# Compute the kurtosis
expect=Mean(RVar)
sig=sqrt(Variance(RVar))
Term1=ExpectedValue(RVar,x**4)
Term2=4*expect*ExpectedValue(RVar,x**3)
Term3=6*(expect**2)*ExpectedValue(RVar,x**2)
Term4=3*expect**4
kurt=(Term1-Term2+Term3-Term4)/(sig**4)
kurt=simplify(kurt)
if cache==True:
RVar.add_to_cache('kurtosis',kurt)
return simplify(kurt)
def MaximumIID(RVar,n=Symbol('n')):
"""
Procedure Name: MaximumIID
Purpose: Compute the maximum of n iid random variables
Arguments: 1. RVar: A random variable
2. n: an integer
Output: 1. The maximum of n iid random variables
"""
# Check to make sure n is an integer
if type(n)!=int:
if n.__class__.__name__!='Symbol':
raise RVError('The second argument must be an integer')
# If n is symbolic, find and return the maximum using
# OrderStat (may need to test and see if this is more
# efficient than using the for loop for non symbolic parameters)
if n.__class__.__name__=='Symbol':
return OrderStat(RVar,n,n)
# Compute the iid maximum
else:
X_dummy=RVar
X_final=X_dummy
for i in range(n-1):
X_final=Maximum(X_final,X_dummy)
return PDF(X_final)
def Mean(RVar,cache=False):
"""
Procedure Name: Mean
Purpose: Compute the mean of a random variable
Arguments: 1. RVar: A random variable
Output: 1. The mean of a random variable
"""
# If the input is a list of data, compute the mean
# for the data set
if type(RVar)==list:
Xstar=BootstrapRV(RVar)
return Mean(Xstar)
# If the mean of the random variable is already cached in memory,
# retriew the value of the mean and return in.
if RVar.cache != None and 'mean' in RVar.cache:
return RVar.cache['mean']
# Find the PDF of the random variable
# If the random variable is continuous, find and return the mean
X_dummy=PDF(RVar)
if X_dummy.ftype[0]=='continuous':
# Create list of x*f(x)
meanfunc=[]
for i in range(len(X_dummy.func)):
meanfunc.append(x*X_dummy.func[i])
# Integrate to find the mean
meanval=0
for i in range(len(X_dummy.func)):
val=integrate(meanfunc[i],(x,X_dummy.support[i],
X_dummy.support[i+1]))
meanval+=val
meanval=simplify(meanval)
if cache==True:
RVar.add_to_cache('mean',meanval)
return simplify(meanval)
# If the random variable is a discrete function, find and return the mean
if X_dummy.ftype[0]=='Discrete':
# Create list of x*f(x)
meanfunc=[]
for i in range(len(X_dummy.func)):
meanfunc.append(x*X_dummy.func[i])
# Sum to find the mean
meanval=0
for i in range(len(X_dummy.func)):
val=Sum(meanfunc[i],(x,X_dummy.support[i],
X_dummy.support[i+1])).doit()
meanval+=val
meanval=simplify(meanval)
if cache==True:
RVar.add_to_cache('mean',meanval)
return simplify(meanval)
# If the random variable is discrete, find and return the variance
if X_dummy.ftype[0]=='discrete':
meanval=MeanDiscrete(RVar)
if cache==True:
RVar.add_to_cache('mean',meanval)
return simplify(meanval)
#
# Legacy mean code ... update uses faster numpy implementation
#
# Create a list of x*f(x)
#meanlist=[]
#for i in range(len(X_dummy.func)):
# meanlist.append(X_dummy.func[i]*X_dummy.support[i])
# Sum to find the mean
#meanval=0
#for i in range(len(meanlist)):
# meanval+=meanlist[i]
#return simplify(meanval)
def MeanDiscrete(RVar):
"""
Procedure Name: MeanDiscrete
Purpose: Compute the mean of a discrete random variable
Arguments: 1. RVar: A discrete random variable
Output: 1. The mean of the random variable
"""
# Check the random variable to make sure it is discrete
if RVar.ftype[0]=='continuous':
raise RVError('the random variable must be continuous')
elif RVar.ftype[0]=='Discrete':
try:
RVar=Convert(RVar)
except:
err_string='the support of the random variable'
err_string+=' must be finite'
raise RVError(err_string)
# Convert the random variable to PDF form
X_dummy=PDF(RVar)
# Convert the value and the support of the pdf to numpy
# matrices
support=np.matrix(X_dummy.support)
pdf=np.matrix(X_dummy.func)
# Use the numpy element wise multiplication function to
# determine a vector of the values of f(x)*x
vals=np.multiply(support,pdf)
# Sum the values of f(x)*x to find the mean
meanval=vals.sum()
return meanval
def MGF(RVar,cache=False):
"""
Procedure Name: MGF
Purpose: Compute the moment generating function of a random variable
Arguments: 1. RVar: A random variable
Output: 1. The moment generating function
"""
# If the MGF of the random variable is already cached in memory,
# retriew the value of the MGF and return in.
if RVar.cache != None and 'mgf' in RVar.cache:
return RVar.cache['mgf']
mgf=ExpectedValue(RVar,exp(t*x))
mgf=simplify(mgf)
if cache==True:
RVar.add_to_cache('mgf',mgf)
return mgf
def MinimumIID(RVar,n):
"""
Procedure Name: MinimumIID
Purpose: Compute the minimum of n iid random variables
Arguments: 1. RVar: A random variable
2. n: an integer
Output: 1. The minimum of n iid random variables
"""
# Check to make sure n is an integer
if type(n)!=int:
if n.__class__.__name__!='Symbol':
raise RVError('The second argument must be an integer')
# If n is symbolic, find and return the maximum using
# OrderStat (may need to test and see if this is more
# efficient than using the for loop for non symbolic parameters)
if n.__class__.__name__=='Symbol':
return OrderStat(RVar,1,n)
# Compute the iid minimum
else:
X_dummy=RVar
X_final=X_dummy
for i in range(n-1):
X_final=Minimum(X_final,X_dummy)
return PDF(X_final)
def NextCombination(Previous,N):
"""
Procedure Name: NextCombination
Purpose: Generates the next lexicographical combination of
size n. Designed for use in the OrderStat
procedure.
Arguments: 1. Previous: A list
2. N: A positive integer
Output: 1. The next combination
"""
# Initialize the Next list
Next=[]
for i in range(len(Previous)):
Next.append(Previous[i])
n=len(Next)
# If the value in the final position of the combination is not the
# maximum value it can attain, N, then increment it by 1
if Next[n-1]!=N:
Next[n-1]+=1
# If the final position in the combination is already at its maximum
# value, then move left trhough the combination and find the next
# possible value that can be incremented
else:
MoveLeft=True
for i in reversed(range(1,n)):
indx=i-1
if Next[indx]<N+i-n:
Next[indx]+=1
for j in range(1,(n-i+1)):
Next[indx+j]=Next[(indx+j)-1]+1
MoveLeft=False
if MoveLeft==False:
break
return(Next)
def NextPermutation(Previous):
"""
Procedure Name: NextPermutation
Purpose: Generate the next lexicographical permutation of
the given list. Designed for use in the OrderStat
procedure.
Arguments: 1. Previous: A list
Output: 1. The next permutation
"""
# Initialize the Next list
Next=[]
Temp2=[]
for i in range(len(Previous)):
Next.append(Previous[i])
Temp2.append(None)
n=len(Previous)
flag=False
# Find the largest index value i for which Next[i]<Next[i+1]
for i in reversed(range(1,n)):
while flag==False:
indx=i-1
if Next[indx]<Next[indx+1]:
flag=True
OrigVal=Next[indx]
SwapIndex=indx+1
# Find the smallest value Next[j] for which Next[i]<Next[j]
# and i<j
for j in reversed(range(SwapIndex,n)):
if Next[j]<Next[SwapIndex]:
if Next[j]>OrigVal:
SwapIndex=j
Temp1=Next[SwapIndex]
Swap=Next[indx]
Next[SwapIndex]=Swap
Next[indx]=Temp1
# Reverse the order of the values to the right of the leftmost
# swapped value
for k in range(indx+1,n):
Temp2[k]=Next[k]
for m in range(indx+1,n):
Next[m]=Temp2[n+indx-m]
return(Next)
def OrderStat(RVar,n,r,replace='w'):
"""
Procedure Name: OrderStat
Purpose: Compute the distribution of the rth order statistic
from a sample puplation of n
Arguments: 1. RVar: A random variable
2. n: The number of items randomly drawn from the rv
3. r: The index of the order statistic
Output: 1. The desired r out of n OrderStatistic
"""
if r.__class__.__name__!='Symbol' and n.__class__.__name__!='Symbol':
if r>n:
raise RVError('The index cannot be greater than the sample size')
if replace not in ['w','wo']:
raise RVError('Replace must be w or wo')
# If the distribution is continuous, find and return the value of the
# order statistic
if RVar.ftype[0]=='continuous':
if replace == 'wo':
err_string = 'OrderStat without replacement not implemented '
err_string += 'for continuous random variables'
raise RVError(err_string)
# Compute the PDF, CDF and SF of the random variable
pdf_dummy=PDF(RVar)
cdf_dummy=CDF(RVar)
sf_dummy=SF(RVar)
# Compute the factorial constant
const=(factorial(n))/(factorial(r-1)*factorial(n-r))
# Compute the distribution of the order statistic for each
# segment
ordstat_func=[]
for i in range(len(RVar.func)):
fx=pdf_dummy.func[i]
Fx=cdf_dummy.func[i]
Sx=sf_dummy.func[i]
ordfunc=const*(Fx**(r-1))*(Sx**(n-r))*fx
ordstat_func.append(simplify(ordfunc))
# Return the distribution of the order statistic
return RV(ordstat_func,RVar.support,['continuous','pdf'])
# If the distribution is in discrete symbolic form, convert it to
# discrete explicit form and find the order statistic
if RVar.ftype[0]=='Discrete':
if (-oo not in RVar.support) and (oo not in RVar.support):
X_dummy = Convert(RVar)
return OrderStat(X_dummy,n,r,replace)
else:
err_string = 'OrderStat is not currently implemented for '
err_string += 'discrete RVs with infinite support'
raise RVError(err_string)
# If the distribution is continuous, find and return the value of
# the order statistic
if RVar.ftype[0]=='discrete':
fx=PDF(RVar)
Fx=CDF(RVar)
Sx=SF(RVar)
N=len(fx.support)
# With replacement
if replace=='w':
# Numeric PDF
if type(RVar.func[0])!=Symbol:
# If N is one, return the order stat
if N==1:
return RV(1,RVar.support,['discrete','pdf'])
# Add the first term
else:
OSproblist=[]
os_sum=0
for w in range(n-r+1):
val=(binomial(n,w)*
(fx.func[0]**(n-w))*
(Sx.func[1]**(w)))
os_sum+=val
OSproblist.append(os_sum)
# Add term 2 through N-1
for k in range(2,N):
os_sum=0
for w in range(n-r+1):
for u in range(r):
val=(factorial(n)/
(factorial(u)*factorial(n-u-w)
*factorial(w))*
(Fx.func[k-2]**u)*
(fx.func[k-1]**(n-u-w))*
(Sx.func[k]**(w)))
os_sum+=val
OSproblist.append(os_sum)
# Add term N
os_sum=0
for u in range(r):
val=(binomial(n,u)*
(Fx.func[N-2]**u)*
(fx.func[N-1]**(n-u)))
os_sum+=val
OSproblist.append(os_sum)
return RV(OSproblist,RVar.support,['discrete','pdf'])
if replace=='wo':
'''
if n>4:
err_string = 'When sampling without replacement, n must be '
err_string += 'less than 4'
raise RVError(err_string)
'''
# Determine if the PDF has equally likely probabilities
EqLike=True
for i in range(len(fx.func)):
if fx.func[0]!=fx.func[i]:
EqLike=False
if EqLike==False:
break
# Create blank order stat function list
fxOS=[]
for i in range(len(fx.func)):
fxOS.append(0)
# If the probabilities are equally likely
if EqLike==True:
# Need to add algorithm for symbolic 'r'
for i in range(r,(N-n+r+1)):
indx=i-1
val=((binomial(i-1,r-1)*
binomial(1,1)*
binomial(N-i,n-r))/
(binomial(N,n)))
fxOS[indx]=val
return RV(fxOS,fx.support,['discrete','pdf'])
# If the probabilities are not equally likely
elif EqLike==False:
# If the sample size is 1
if n==1:
fxOS=[]
for i in range(len(fx.func)):
fxOS.append(fx.func[i])
return(fxOS,fx.support,['discrete','pdf'])
elif n==N:
fxOS[n-1]=1
return RV(fxOS,fx.support,['discrete','pdf'])
else:
# Create null ProbStorage array of size nXN
# Initialize to contain all zeroes
print n,N
ProbStorage=[]
for i in range(n):
row_list=[]
for j in range(N):
row_list.append(0)
ProbStorage.append(row_list)
# Create the first lexicographical combo of
# n items
combo=range(1,n+1)
for i in range(1,(binomial(N,n)+1)):
# Assign perm as the current combo
perm=[]
for j in range(len(combo)):
perm.append(combo[j])
# Compute the probability of obtaining the
# given permutation
for j in range(1,factorial(n)+1):
PermProb=fx.func[perm[0]]
cumsum=fx.func[perm[0]]
for m in range(1,n):
PermProb*=fx.func[perm[m]]/(1-cumsum)
cumsum+=fx.func[perm[m]]
print perm,PermProb,cumsum
# Order each permutation and determine
# which value sits in the rth
# ordered position
orderedperm=[]
for m in range(len(perm)):
orderedperm.append(perm[m])
orderedperm.sort()
for m in range(n):
for k in range(N):
if orderedperm[m]==k+1:
ProbStorage[m][k]=(PermProb+
ProbStorage[m][k])
# Find the next lexicographical permutation
perm=NextPermutation(perm)
# Find the next lexicographical combination
combo=NextCombination(combo,N)
def Pow(RVar,n):
"""
Procedure Name: Pow
Purpose: Compute the transformation of a random variable by an exponent
Arguments: 1. RVar: A random variable
2. n: an integer
Output: 1. The transformation of the RV by x**n
"""
if type(n) != int:
err_str = 'n must be an integer'
raise RVError(err_str)
# If n is even, then g is a two-to-one transformation
if n%2 == 0:
g=[[x**n,x**n],[-oo,0,oo]]
# If n is odd, the g is a one-to-one transformation
elif n%2 == 1:
g=[[x**n],[-oo,oo]]
return Transform(RVar,g)
def ProductIID(RVar,n):
"""
Procedure Name: ProductIID
Purpose: Compute the product of n iid random variables
Arguments: 1. RVar: A random variable
2. n: an integer
Output: 1. The product of n iid random variables
"""
# Check to make sure n is an integer
if type(n)!=int:
raise RVError('The second argument must be an integer')
# Compute the iid convolution
X_dummy=PDF(RVar)
X_final=X_dummy
for i in range(n-1):
X_final*=X_dummy
return PDF(X_final)
def RangeStat(RVar,n,replace='w'):
"""
Procedure Name: RangeStat
Purpose: Compute the distribution of the range of n iid rvs
Arguments: 1. RVar: A random variable
2. n: an integer
3. replace: indicates with or without replacment
Output: 1. The dist of the range of n iid random variables
"""
# Check to make sure that n >= 2, otherwise there is no range
if n<2:
err_string = 'Only one item sampled from the population'
raise RVError(err_string)
if replace not in ['w','wo']:
raise RVError('Replace must be w or wo')
# Convert the random variable to its PDF form
fX = PDF(RVar)
# If the random variable is continuous and its CDF is tractable,
# find the PDF of the range statistic
z = Symbol('z')
if fX.ftype[0] == 'continuous':
if replace == 'wo':
err_string = 'OrderStat without replacement not implemented '
err_string += 'for continuous random variables'
raise RVError(err_string)
FX = CDF(RVar)
nsegs = len(FX.func)
fXRange = []
for i in range(nsegs):
ffX = integrate( n*(n-1)*
(FX.func[i].subs(x,z) -
FX.func[i].subs(x,z-x))**(n-2)
*fX.func[i].subs(x,z-x)
*fX.func[i].subs(x,z),(z,x,fX.support[i+1]))
fXRange.append(ffX)
RangeRV = RV(fXRange,fX.support,fX.ftype)
return RangeRV
# If the random variable is discrete symbolic, convert it to discrete
# explicit and compute the range statistic
if fX.ftype[0] == 'Discrete':
if (-oo not in fX.support) and (oo not in fX.support):
X_dummy = Convert(RVar)
return RangeStat(X_dummy,n,replace)
# If the reandom variable is discrete explicit, find and return the
# range stat
if fX.ftype[0] == 'discrete':
fX = PDF(RVar)
FX = CDF(RVar)
N = len(fX.support)
if N < 2:
err_string = 'The population only consists of 1 element'
raise RVError(err_string)
if replace == 'w':
s = fX.support
p = fX.func
k = 0
# rs is an array that holds the range support values
# rp is an array that holds the range probability mass values
# There are 1 + 2 + 3 + ... + N possible range support values
# if the support is of size N. 'uppers' is this limit
uppers = sum(range(1,N+1))
rs = [0 for i in range(N**2)]
rp = [0 for i in range(N**2)]
for i in range(N):
for j in range(N):
rs[k] = s[j] - s[i]
rp[k] = (sum(p[i:j+1])**n -
sum(p[i+1:j+1])**n -
sum(p[i:j])**n+
sum(p[i+1:j])**n)
k+=1
# Sort rs and rp together by rs
sortedr = zip(*sorted(zip(rs,rp)))
sortrs = list(sortedr[0])
sortrp = list(sortedr[1])
# Combine redundant elements in the list
sortrs2=[]
sortrp2=[]
for i in range(len(sortrs)):
if sortrs[i] not in sortrs2:
if sortrp[i] > 0:
sortrs2.append(sortrs[i])
sortrp2.append(sortrp[i])
elif sortrs[i] in sortrs2:
idx=sortrs2.index(sortrs[i])
sortrp2[idx]+=sortrp[i]
return RV(sortrp2,sortrs2,['discrete','pdf'])
if replace == 'wo':
err_string = 'RangeStat current not implemented without '
err_string += 'replacement'
raise RVError(err_string)
if n==N:
fXRange = [1]
fXSupport = [N-1]
else:
fXRange = [0 for i in range(N)]
fXSupport = [value for value in fX.support]
# Create the first lexicographical combo of n items
combo = [value for value in range(1,n+1)]
for i in range(binomial(N,n)):
# Assign perm as the current combo
perm = [elem for elem in combo]
# Compute the probability of obtaining the permutation
for j in range(factorial(n)):
PermProb = fX.func[perm[0]]
cumsum = fX.func[perm[0]]
for m in range(1,n):
PermProb *= fX.func[perm[m]]/(1-cumsum)
cumsum += fX.func[perm[m]]
# Find the maximum and minimum elements of the
# permutation and then determine their difference
HiVal = max(perm)
LoVal = min(perm)
Range = HiVal - LoVal
flag = True
for k in range(N-1):
if Range == k+1:
fXRange[k] += PermProb
# Find the next lexicographical permutation
perm = NextPermutation(perm)
combo = NextCombination(combo,N)
print len(fXRange),len(fXSupport)
return RV(fXRange,fXSupport,fX.ftype)
def Skewness(RVar,cache=False):
"""
Procedure Name: Skewness
Purpose: Compute the skewness of a random variable
Arguments: 1. RVar: A random variable
Output: 1. The skewness of the random variable
"""
# If the input is a list of data, compute the Skewness
# for the data set
if type(RVar)==list:
Xstar=BootstrapRV(RVar)
return Skewness(Xstar)
# If the skewness of the random variable is already cached in memory,
# retriew the value of the skewness and return in.
if RVar.cache != None and 'skewness' in RVar.cache:
return RVar.cache['skewness']
# Compute the skewness
expect=Mean(RVar)
sig=sqrt(Variance(RVar))
Term1=ExpectedValue(RVar,x**3)
Term2=3*expect*ExpectedValue(RVar,x**2)
Term3=2*expect**3
skew=(Term1-Term2+Term3)/(sig**3)
skew=simplify(skew)
if cache==True:
RVar.add_to_cache('skewness',skew)
return simplify(skew)
def Sqrt(RVar):
"""
Procedure Name: Sqrt
Purpose: Computes the transformation of a random variable by sqrt(x)
Arguments: 1. RVar: A random variable
Output: 1. The random variable transformed by sqrt(x)
"""
for element in RVar.support:
if element < 0:
err_string = 'A negative value appears in the support of the'
err_string += ' random variable.'
raise RVError(err_string)
u=[[sqrt(x)],[0,oo]]
NewRvar=Transform(RVar,u)
return NewRvar
def Transform(RVar,gXt):
"""
Procedure Name: Transform
Purpose: Compute the transformation of a random variable
by a a function g(x)
Arguments: 1. RVar: A random variable
2. gX: A transformation in list of two lists format
Output: 1. The transformation of RVar
"""
# Check to make sure support of transform is in ascending order
for i in range(len(gXt[1])-1):
if gXt[1][i]>gXt[1][i+1]:
raise RVError('Transform support is not in ascending order')
# Convert the RV to its PDF form
X_dummy=PDF(RVar)
# If the distribution is continuous, find and return the transformation
if RVar.ftype[0]=='continuous':
# Adjust the transformation to include the support of the random
# variable
gXold=[]
for i in range(len(gXt)):
gXold.append(gXt[i])
gXsupp=[]
for i in range(len(gXold[1])):
gXsupp.append(gXold[1][i])
# Add the support of the random variable into the support
# of the transformation
for i in range(len(X_dummy.support)):
if X_dummy.support[i] not in gXsupp:
gXsupp.append(X_dummy.support[i])
gXsupp.sort()
# Find which segment of the transformation applies, and add it
# to the transformation list
gXfunc=[]
for i in range(1,len(gXsupp)):
for j in range(len(gXold[0])):
if gXsupp[i]>=gXold[1][j]:
if gXsupp[i]<=gXold[1][j+1]:
gXfunc.append(gXold[0][j])
break
# Set the adjusted transformation as gX
gX=[]
gX.append(gXfunc)
gX.append(gXsupp)
# If the support of the transformation does not match up with the
# support of the RV, adjust the support of the transformation
# Traverse list to find elements that are not within the support
# of the rv
for i in range(len(gX[1])):
if gX[1][i]<X_dummy.support[0]:
gX[1][i]=X_dummy.support[0]
if gX[1][i]>X_dummy.support[len(X_dummy.support)-1]:
gX[1][i]=X_dummy.support[len(X_dummy.support)-1]
# Delete segments of the transformation that will not be used
gX0_removal = []
gX1_removal = []
for i in range(len(gX[0])-1):
if gX[1][i]==gX[1][i+1]:
gX0_removal.append(i)
gX1_removal.append(i+1)
for i in range(len(gX0_removal)):
index = gX0_removal[i]
del gX[0][index-i]
for i in range(len(gX1_removal)):
index = gX1_removal[i]
del gX[1][index-i]
# Create a list of mappings x->g(x)
mapping=[]
for i in range(len(gX[0])):
gXsubs1 = gX[0][i].subs(x,gX[1][i])
if gXsubs1 == zoo:
gXsubs1 = limit(gX[0][i],x,gX[1][i])
gXsubs2 = gX[0][i].subs(x,gX[1][i+1])
if gXsubs2 == zoo:
gXsubs2 = limit(gX[0][i+1],x,gX[1][i+1])
mapping.append([gXsubs1, gXsubs2])
# Create the support for the transformed random variable
trans_supp=[]
for i in range(len(mapping)):
for j in range(2):
if mapping[i][j] not in trans_supp:
trans_supp.append(mapping[i][j])
if zoo in trans_supp:
error_string='complex infinity appears in the support, '
error_string+='please check for an undefined transformation '
error_string+='such as 1/0'
raise RVError(error_string)
trans_supp.sort()
# Find which segment of the transformation each transformation
# function applies to
applist=[]
for i in range(len(mapping)):
temp=[]
for j in range(len(trans_supp)-1):
if min(mapping[i])<=trans_supp[j]:
if max(mapping[i])>=trans_supp[j+1]:
temp.append(j)
applist.append(temp)
# Find the appropriate inverse for each g(x)
ginv=[]
for i in range(len(gX[0])):
# Find the 'test point' for the inverse
if [gX[1][i],gX[1][i+1]]==[-oo,oo]:
c=0
elif gX[1][i]==-oo and gX[1][i+1]!=oo:
c=gX[1][i+1]-1
elif gX[1][i]!=-oo and gX[1][i+1]==oo:
c=gX[1][i]+1
else:
c=(gX[1][i]+gX[1][i+1])/2
# Create a list of possible inverses
invlist=solve(gX[0][i]-t,x)
# Use the test point to determine the correct inverse
for j in range(len(invlist)):
# If g-1(g(c))=c, then the inverse is correct
test=invlist[j].subs(t,gX[0][i].subs(x,c))
#if test.__class__.__name__ != 'Mul':
try:
if test<=Float(float(c),10)+.0000001:
if test >= Float(float(c),10)-.0000001:
ginv.append(invlist[j])
except:
if j==len(invlist)-1 and len(ginv) < i+1:
ginv.append(None)
# Find the transformation function for each segment'
seg_func=[]
for i in range(len(X_dummy.func)):
# Only find transformation for applicable segments
for j in range(len(gX[0])):
if gX[1][j]>=X_dummy.support[i]:
if gX[1][j+1]<=X_dummy.support[i+1]:
#print X_dummy.func[i], ginv[j]
if type(X_dummy.func[i]) not in [float,int]:
tran=X_dummy.func[i].subs(x,ginv[j])
tran=tran*diff(ginv[j],t)
else:
tran=X_dummy.func[i]*diff(ginv[j],t)
seg_func.append(tran)
# Sum the transformations for each piece of the transformed
# random variable
trans_func=[]
for i in range(len(trans_supp)-1):
h=0
for j in range(len(seg_func)):
if i in applist[j]:
if mapping[j][0]<mapping[j][1]:
h=h+seg_func[j]
else:
h=h-seg_func[j]
trans_func.append(h)
# Substitute x into the transformed random variable
trans_func2=[]
for i in range(len(trans_func)):
if type(trans_func[i]) not in [int,float]:
trans_func2.append(simplify(trans_func[i].subs(t,x)))
else:
trans_func2.append(trans_func[i])
# Create and return the random variable
return RV(trans_func2,trans_supp,['continuous','pdf'])
# If the distribution in symbolic discrete, convert it and then compute
# the transformation
if RVar.ftype[0]=='Discrete':
for element in RVar.support:
if (element in [-oo,oo]) or (element.__class__.__name__=='Symbol'):
err_string = 'Transform is not implemented for discrete '
err_string += 'random variables with symbolic or inifinite '
err_string += 'support'
raise RVError(err_string)
X_dummy = Convert(RVar)
return Transform(X_dummy,gXt)
# If the distribution is discrete, find and return the transformation
if RVar.ftype[0]=='discrete':
gX=gXt
trans_sup=[]
# Find the portion of the transformation each element
# in the random variable applies to, and then transform it
for i in range(len(X_dummy.support)):
X_support=X_dummy.support[i]
if X_support < min(gX[1]) or X_support > max(gX[1]):
trans_sup.append(X_support)
for j in range(len(gX[1])-1):
if X_support>=gX[1][j] and X_support<=gX[1][j+1]:
trans_sup.append(gX[0][j].subs(x,X_dummy.support[i]))
break
# Break is required, otherwise points on the boundaries
# between two segments of the transformation will
# be entered twice
# Sort the function and support lists
sortlist=zip(trans_sup,X_dummy.func)
sortlist.sort()
translist=[]
funclist=[]
for i in range(len(sortlist)):
translist.append(sortlist[i][0])
funclist.append(sortlist[i][1])
# Combine redundant elements in the list
translist2=[]
funclist2=[]
for i in range(len(translist)):
if translist[i] not in translist2:
translist2.append(translist[i])
funclist2.append(funclist[i])
elif translist[i] in translist2:
idx=translist2.index(translist[i])
funclist2[idx]+=funclist[i]
# Return the transformed random variable
return RV(funclist2,translist2,['discrete','pdf'])
def Truncate(RVar,supp):
"""
Procedure Name: Truncate
Purpose: Truncate a random variable
Arguments: 1. RVar: A random variable
2. supp: The support of the truncated random variable
Output: 1. A truncated random variable
"""
# Check to make sure the support of the truncated random
# variable is given in ascending order
if supp[0]>supp[1]:
raise RVError('The support must be given in ascending order')
# Conver the random variable to its pdf form
X_dummy=PDF(RVar)
cdf_dummy=CDF(RVar)
# If the random variable is continuous, find and return
# the truncated random variable
if RVar.ftype[0]=='continuous':
# Find the area of the truncated random variable
area=CDF(cdf_dummy,supp[1])-CDF(cdf_dummy,supp[0])
#area=0
#for i in range(len(X_dummy.func)):
# val=integrate(X_dummy.func[i],(x,X_dummy.support[i],
# X_dummy.support[i+1]))
# area+=val
#print area
# Cut out parts of the distribution that don't fall
# within the new limits
for i in range(len(X_dummy.func)):
if supp[0]>=X_dummy.support[i]:
if supp[0]<=X_dummy.support[i+1]:
lwindx=i
if supp[1]>=X_dummy.support[i]:
if supp[1]<=X_dummy.support[i+1]:
upindx=i
truncfunc=[]
for i in range(len(X_dummy.func)):
if i>=lwindx and i<=upindx:
truncfunc.append(simplify(X_dummy.func[i]/area))
truncsupp=[supp[0]]
upindx+=1
for i in range(len(X_dummy.support)):
if i>lwindx and i<upindx:
truncsupp.append(X_dummy.support[i])
truncsupp.append(supp[1])
# Return the truncated random variable
return RV(truncfunc,truncsupp,['continuous','pdf'])
# If the random variable is a discrete function, find and return
# the truncated random variable
if RVar.ftype[0]=='Discrete':
# Find the area of the truncated random variable
area=CDF(cdf_dummy,supp[1])-CDF(cdf_dummy,supp[0])
# Cut out parts of the distribution that don't fall
# within the new limits
for i in range(len(X_dummy.func)):
if supp[0]>=X_dummy.support[i]:
if supp[0]<=X_dummy.support[i+1]:
lwindx=i
if supp[1]>=X_dummy.support[i]:
if supp[1]<=X_dummy.support[i+1]:
upindx=i
truncfunc=[]
for i in range(len(X_dummy.func)):
if i>=lwindx and i<=upindx:
truncfunc.append(X_dummy.func[i]/area)
truncsupp=[supp[0]]
upindx+=1
for i in range(len(X_dummy.support)):
if i>lwindx and i<upindx:
truncsupp.append(X_dummy.support[i])
truncsupp.append(supp[1])
# Return the truncated random variable
return RV(truncfunc,truncsupp,['Discrete','pdf'])
# If the distribution is discrete, find and return the
# truncated random variable
if RVar.ftype[0]=='discrete':
# Find the area of the truncated random variable
area=0
for i in range(len(X_dummy.support)):
if X_dummy.support[i]>=supp[0]:
if X_dummy.support[i]<=supp[1]:
area+=X_dummy.func[i]
# Truncate the random variable and find the probability
# at each point
truncfunc=[]
truncsupp=[]
for i in range(len(X_dummy.support)):
if X_dummy.support[i]>=supp[0]:
if X_dummy.support[i]<=supp[1]:
truncfunc.append(X_dummy.func[i]/area)
truncsupp.append(X_dummy.support[i])
# Return the truncated random variable
return RV(truncfunc,truncsupp,['discrete','pdf'])
def Variance(RVar,cache=False):
"""
Procedure Name: Variance
Purpose: Compute the variance of a random variable
Arguments: 1. RVar: A random variable
Output: 1. The variance of a random variable
"""
# If the input is a list of data, compute the variance
# for the data set
if type(RVar)==list:
Xstar=BootstrapRV(RVar)
return Variance(Xstar)
# If the variance of the random variable is already cached in memory,
# retriew the value of the variance and return in.
if RVar.cache != None and 'variance' in RVar.cache:
return RVar.cache['variance']
# Find the PDF of the random variable
X_dummy=PDF(RVar)
# If the random variable is continuous, find and return the variance
if X_dummy.ftype[0]=='continuous':
# Find the mean of the random variable
EX=Mean(X_dummy)
# Find E(X^2)
# Create list of (x**2)*f(x)
varfunc=[]
for i in range(len(X_dummy.func)):
varfunc.append((x**2)*X_dummy.func[i])
# Integrate to find E(X^2)
exxval=0
for i in range(len(X_dummy.func)):
val=integrate(varfunc[i],(x,X_dummy.support[i],
X_dummy.support[i+1]))
exxval+=val
# Find Var(X)=E(X^2)-E(X)^2
var=exxval-(EX**2)
var=simplify(var)
if cache==True:
RVar.add_to_cache('variance',var)
return simplify(var)
# If the random variable is a discrete function, find and return
# the variance
if X_dummy.ftype[0]=='Discrete':
# Find the mean of the random variable
EX=Mean(X_dummy)
# Find E(X^2)
# Create list of (x**2)*f(x)
varfunc=[]
for i in range(len(X_dummy.func)):
varfunc.append((x**2)*X_dummy.func[i])
# Sum to find E(X^2)
exxval=0
for i in range(len(X_dummy.func)):
val=summation(varfunc[i],(x,X_dummy.support[i],
X_dummy.support[i+1]))
exxval+=val
# Find Var(X)=E(X^2)-E(X)^2
var=exxval-(EX**2)
var=simplify(var)
if cache==True:
RVar.add_to_cache('variance',var)
return simplify(var)
# If the random variable is discrete, find and return the variance
if X_dummy.ftype[0]=='discrete':
var=VarDiscrete(RVar)
if cache==True:
RVar.add_to_cache('variance',var)
return simplify(var)
#
# Legacy variance code ... update uses faster numpy implementation
#
# Find the mean of the random variable
#EX=Mean(X_dummy)
# Find E(X^2)
# Create a list of (x**2)*f(x)
#exxlist=[]
#for i in range(len(X_dummy.func)):
# exxlist.append(X_dummy.func[i]*(X_dummy.support[i])**2)
# Sum to find E(X^2)
#exxval=0
#for i in range(len(exxlist)):
# exxval+=exxlist[i]
# Find Var(X)=E(X^2)-E(X)^2
#var=exxval-(EX**2)
#return simplify(var)
def VarDiscrete(RVar):
"""
Procedure Name: VarDiscrete
Purpose: Compute the variance of a discrete random variable
Arguments: 1. RVar: a discrete random variable
Output: 1. The variance of the random variable
"""
# Check the random variable to make sure it is discrete
if RVar.ftype[0]=='continuous':
raise RVError('the random variable must be continuous')
elif RVar.ftype[0]=='Discrete':
try:
RVar=Convert(RVar)
except:
err_string='the support of the random variable'
err_string+=' must be finite'
raise RVError(err_string)
# Convert the random variable to PDF form
X_dummy=PDF(RVar)
# Mind the mean of the random variable
EX=MeanDiscrete(RVar)
# Convert the values and support of the random variable
# to vector form
support=np.matrix(RVar.support)
pdf=np.matrix(RVar.func)
# Find E(X^2) by creating a vector containing the values
# of f(x)*x**2 and summing the result
supportsqr=np.multiply(support,support)
EXXvals=np.multiply(supportsqr,pdf)
EXX=EXXvals.sum()
# Find Var(X)=E(X^2)-E(X)^2
var=EXX-(EX**2)
return var
def VerifyPDF(RVar):
"""
Procedure Name: VerifyPDF
Purpose: Calls self.verifyPDF(). For compatibility with
original APPL syntax
Arguments: 1. RVar: a discrete random variable
Output: 1. A function call to self.verifyPDF()
"""
return RVar.verifyPDF()
"""
Procedures on Two Random Variables
Procedures:
1. Convolution(RVar1,RVar2)
2. Maximum(RVar1,RVar2)
3. Minimum(RVar1,RVar2)
4. Mixture(MixParameters,MixRVs)
5. Product(RVar1,RVar2)
"""
def Convolution(RVar1,RVar2):
"""
Procedure Name: Convolution
Purpose: Compute the convolution of two independent
random variables
Arguments: 1. RVar1: A random variable
2. RVar2: A random variable
Output: 1. The convolution of RVar1 and RVar2
"""
# If the two random variables are not both continuous or
# both discrete, return an error
if RVar1.ftype[0]!=RVar2.ftype[0]:
discr=['discrete','Discrete']
if (RVar1.ftype[0] not in discr) and (RVar2.ftype[0] not in discr):
raise RVError('Both random variables must have the same type')
# Convert both random variables to their PDF form
X1_dummy=PDF(RVar1)
X2_dummy=PDF(RVar2)
# If the distributions are continuous, find and return the convolution
# of the two random variables
if RVar1.ftype[0]=='continuous':
# X1_dummy.drop_assumptions()
#X2_dummy.drop_assumptions()
# If the two distributions are both lifetime distributions, treat
# as a special case
if RVar1.support==[0,oo] and RVar2.support==[0,oo]:
#x=Symbol('x',positive=True)
z=Symbol('z',positive=True)
func1=X1_dummy.func[0]
func2=X2_dummy.func[0].subs(x,z-x)
int_func=expand(func1*func2)
conv=integrate(int_func,(x,0,z),conds='none')
conv_final=conv.subs(z,x)
conv=expand(conv_final)
conv=simplify(conv_final)
return RV([conv_final],[0,oo],['continuous','pdf'])
# Otherwise, compute the convolution using the product method
elif RVar1.support==[0,1] and RVar2.support==[0,1]:
z = Symbol('z', positive = True)
xx = Symbol('xx', positive = True)
func1 = X1_dummy.func[0].subs(x,xx)
func2 = X2_dummy.func[0].subs(x,z-xx)
fz1 = integrate(func1*func2, (xx,0,z))
fz1 = fz1.subs(z,x)
fz2 = integrate(func1*func2, (xx,z-1,1))
fz2 = fz2.subs(z,x)
return RV([fz1,fz2],[0,1,2],['continuous','pdf'])
else:
gln=[[ln(x)],[0,oo]]
ge=[[exp(x),exp(x)],[-oo,0,oo]]
temp1=Transform(X1_dummy,ge)
temp2=Transform(X2_dummy,ge)
temp3=Product(temp1,temp2)
fz=Transform(temp3,gln)
convfunc=[]
for i in range(len(fz.func)):
convfunc.append(simplify(fz.func[i]))
return RV(convfunc,fz.support,['continuous','pdf'])
# If the two random variables are discrete in functinonal form,
# find and return the convolution of the two random variables
if RVar1.ftype[0]=='Discrete':
for num in RVar1.support:
if type(num) not in [int,float]:
err_string='Convolution does not currently work with'
err_string=' RVs that have symbolic or infinite support'
raise RVError(err_string)
RVar1=Convert(RVar1)
if RVar2.ftype[0]=='Discrete':
for num in RVar1.support:
if type(num) not in [int,float]:
err_string='Convolution does not currently work with'
err_string=' RVs that have symbolic or infinite support'
raise RVError(err_string)
RVar2=Convert(RVar2)
# If the distributions are discrete, find and return the convolution
# of the two random variables.
if RVar1.ftype[0]=='discrete':
# Convert each random variable to its pdf form
X1_dummy=PDF(RVar1)
X2_dummy=PDF(RVar2)
# Create function and support lists for the convolution of the
# two random variables
convlist=[]
funclist=[]
for i in range(len(X1_dummy.support)):
for j in range(len(X2_dummy.support)):
convlist.append(X1_dummy.support[i]+X2_dummy.support[j])
funclist.append(X1_dummy.func[i]*X2_dummy.func[j])
# Sort the function and support lists for the convolution
sortlist=zip(convlist,funclist)
sortlist.sort()
convlist2=[]
funclist2=[]
for i in range(len(sortlist)):
convlist2.append(sortlist[i][0])
funclist2.append(sortlist[i][1])
# Remove redundant elements in the support list
convlist3=[]
funclist3=[]
for i in range(len(convlist2)):
if convlist2[i] not in convlist3:
convlist3.append(convlist2[i])
funclist3.append(funclist2[i])
else:
funclist3[convlist3.index(convlist2[i])]+=funclist2[i]
# Create and return the new random variable
return RV(funclist3,convlist3,['discrete','pdf'])
def Maximum(*argv):
"""
Procedure Name: Maximum
Purpose: Compute the maximum of a list of random variables
Arugments: 1. *argv: a series of random variables
Output: 1. The maximum distribution
"""
# Loop over the arguments and compute the distribution of the maximum
# of each argument
i=0
for rv in argv:
# For the first argument, create a temporary variable containing
# that rv
if i==0:
temp=rv
# For all others, find the maximum of the temporary variable and
# the rv
else:
temp=MaximumRV(temp,rv)
i+=1
return temp
def MaximumRV(RVar1,RVar2):
"""
Procedure Name: MaximumRV
Purpose: Compute cdf of the maximum of RVar1 and RVar2
Arguments: 1. RVar1: A random variable
2. RVar2: A random variable
Output: 1. The cdf of the maximum distribution
"""
# If the two random variables are not of the same type
# raise an error
if RVar1.ftype[0]!=RVar2.ftype[0]:
raise RVError('The RVs must both be discrete or continuous')
# If the distributions are continuous, find and return the max
if RVar1.ftype[0]=='continuous':
#X1_dummy.drop_assumptions()
#X2_dummy.drop_assumptions()
# Special case for lifetime distributions
if RVar1.support==[0,oo] and RVar2.support==[0,oo]:
cdf_dummy1=CDF(RVar1)
cdf_dummy2=CDF(RVar2)
cdf1=cdf_dummy1.func[0]
cdf2=cdf_dummy2.func[0]
maxfunc=cdf1*cdf2
return PDF(RV(simplify(maxfunc),[0,oo],['continuous','cdf']))
# Otherwise, compute the max using the full algorithm
# Set up the support for X
Fx=CDF(RVar1)
Fy=CDF(RVar2)
# Create a support list for the
max_supp=[]
for i in range(len(Fx.support)):
if Fx.support[i] not in max_supp:
max_supp.append(Fx.support[i])
for i in range(len(Fy.support)):
if Fy.support[i] not in max_supp:
max_supp.append(Fy.support[i])
max_supp.sort()
# Remove any elements that are above the lower support max
lowval=max(min(Fx.support),min(Fy.support))
max_supp2=[]
for i in range(len(max_supp)):
if max_supp[i]>=lowval:
max_supp2.append(max_supp[i])
# Compute the maximum function for each segment
max_func=[]
for i in range(len(max_supp2)-1):
value=max_supp2[i]
currFx=1
for j in range(len(Fx.func)):
if value>=Fx.support[j] and value<Fx.support[j+1]:
currFx=Fx.func[j]
break
currFy=1
for j in range(len(Fy.func)):
if value>=Fy.support[j] and value<Fy.support[j+1]:
currFy=Fy.func[j]
Fmax=currFx*currFy
max_func.append(simplify(Fmax))
return PDF(RV(max_func,max_supp2,['continuous','cdf']))
# If the two random variables are discrete in functinonal form,
# find and return the maximum of the two random variables
if RVar1.ftype[0]=='Discrete':
for num in RVar1.support:
if type(num) not in [int,float]:
err_string='Maximum does not currently work with'
err_string=' RVs that have symbolic or infinite support'
raise RVError(err_string)
RVar1=Convert(RVar1)
if RVar2.ftype[0]=='Discrete':
for num in RVar1.support:
if type(num) not in [int,float]:
err_string='Maximum does not currently work with'
err_string=' RVs that have symbolic or infinite support'
raise RVError(err_string)
RVar2=Convert(RVar2)
# If the distributions are discrete, find and return
# the maximum of the two rv's
if RVar1.ftype[0]=='discrete':
# Convert X and Y to their PDF representations
fx=PDF(RVar1)
fy=PDF(RVar2)
# Make a list of possible combinations of X and Y
combo_list=[]
prob_list=[]
for i in range(len(fx.support)):
for j in range(len(fy.support)):
combo_list.append([fx.support[i],fy.support[j]])
prob_list.append(fx.func[i]*fy.func[j])
# Old code for computing probability for each pair, had
# floating point issues, PDF wouldn't recognize a number
# as being in the support
#prob_list=[]
#for i in range(len(combo_list)):
# val=PDF(fx,combo_list[i][0])*PDF(fy,combo_list[j][1])
# prob_list.append(val)
# Find the max value for each combo
max_list=[]
for i in range(len(combo_list)):
max_list.append(max(combo_list[i][0],combo_list[i][1]))
# Compute the probability for each possible max
max_supp=[]
max_func=[]
for i in range(len(max_list)):
if max_list[i] not in max_supp:
max_supp.append(max_list[i])
max_func.append(prob_list[i])
else:
indx=max_supp.index(max_list[i])
max_func[indx]+=prob_list[i]
# Sort the elements of the rv
zip_list=zip(max_supp,max_func)
zip_list.sort()
max_supp=[]
max_func=[]
for i in range(len(zip_list)):
max_supp.append(zip_list[i][0])
max_func.append(zip_list[i][1])
# Return the minimum random variable
return PDF(RV(max_func,max_supp,['discrete','pdf']))
def Minimum(*argv):
"""
Procedure Name: Minimum
Purpose: Compute the minimum of a list of random variables
Arugments: 1. *argv: a series of random variables
Output: 1. The minimum distribution
"""
# Loop over the arguments and compute the distribution of the maximum
# of each argument
i=0
for rv in argv:
# For the first argument, create a temporary variable containing
# that rv
if i==0:
temp=rv
# For all others, find the minimum of the temporary variable and
# the rv
else:
temp=MinimumRV(temp,rv)
i+=1
return temp
def MinimumRV(RVar1,RVar2):
"""
Procedure Name: MinimumRV
Purpose: Compute the distribution of the minimum of RVar1 and RVar2
Arguments: 1. RVar1: A random variable
2. RVar2: A random variable
Output: 1. The minimum of the two random variables
"""
# If the two random variables are not of the same type
# raise an error
if RVar1.ftype[0]!=RVar2.ftype[0]:
raise RVError('The RVs must both be discrete or continuous')
# If the distributions are continuous, find and return the min
if RVar1.ftype[0]=='continuous':
#X1_dummy.drop_assumptions()
#X2_dummy.drop_assumptions()
# Special case for lifetime distributions
if RVar1.support==[0,oo] and RVar2.support==[0,oo]:
sf_dummy1=SF(RVar1)
sf_dummy2=SF(RVar2)
sf1=sf_dummy1.func[0]
sf2=sf_dummy2.func[0]
minfunc=1-(sf1*sf2)
return PDF(RV(simplify(minfunc),[0,oo],['continuous','cdf']))
# Otherwise, compute the min using the full algorithm
Fx=CDF(RVar1)
Fy=CDF(RVar2)
# Create a support list for the
min_supp=[]
for i in range(len(Fx.support)):
if Fx.support[i] not in min_supp:
min_supp.append(Fx.support[i])
for i in range(len(Fy.support)):
if Fy.support[i] not in min_supp:
min_supp.append(Fy.support[i])
min_supp.sort()
# Remove any elements that are above the lower support max
highval=min(max(Fx.support),max(Fy.support))
min_supp2=[]
for i in range(len(min_supp)):
if min_supp[i]<=highval:
min_supp2.append(min_supp[i])
# Compute the minimum function for each segment
min_func=[]
for i in range(len(min_supp2)-1):
value=min_supp2[i]
currFx=0
for j in range(len(Fx.func)):
if value>=Fx.support[j] and value<=Fx.support[j+1]:
currFx=Fx.func[j]
break
currFy=0
for j in range(len(Fy.func)):
if value>=Fy.support[j] and value<=Fy.support[j+1]:
currFy=Fy.func[j]
Fmin=1-((1-currFx)*(1-currFy))
min_func.append(simplify(Fmin))
# Return the random variable
return PDF(RV(min_func,min_supp2,['continuous','cdf']))
# If the two random variables are discrete in functinonal form,
# find and return the minimum of the two random variables
if RVar1.ftype[0]=='Discrete':
for num in RVar1.support:
if type(num) not in [int,float]:
err_string='Minimum does not currently work with'
err_string=' RVs that have symbolic or infinite support'
raise RVError(err_string)
RVar1=Convert(RVar1)
if RVar2.ftype[0]=='Discrete':
for num in RVar1.support:
if type(num) not in [int,float]:
err_string='Minimum does not currently work with'
err_string=' RVs that have symbolic or infinite support'
raise RVError(err_string)
RVar2=Convert(RVar2)
# If the distributions are discrete, find and return
# the minimum of the two rv's
if RVar1.ftype[0]=='discrete':
# Convert X and Y to their PDF representations
fx=PDF(RVar1)
fy=PDF(RVar2)
# Make a list of possible combinations of X and Y
combo_list=[]
prob_list=[]
for i in range(len(fx.support)):
for j in range(len(fy.support)):
combo_list.append([fx.support[i],fy.support[j]])
prob_list.append(fx.func[i]*fy.func[j])
# Old code for computing probability for each pair, had
# floating point issues, PDF wouldn't recognize a number
# as being in the support
#prob_list=[]
#for i in range(len(combo_list)):
# val=PDF(fx,combo_list[i][0])*PDF(fy,combo_list[j][1])
# prob_list.append(val)
# Find the min value for each combo
min_list=[]
for i in range(len(combo_list)):
min_list.append(min(combo_list[i][0],combo_list[i][1]))
# Compute the probability for each possible min
min_supp=[]
min_func=[]
for i in range(len(min_list)):
if min_list[i] not in min_supp:
min_supp.append(min_list[i])
min_func.append(prob_list[i])
else:
indx=min_supp.index(min_list[i])
min_func[indx]+=prob_list[i]
# Sort the elements of the rv
zip_list=zip(min_supp,min_func)
zip_list.sort()
min_supp=[]
min_func=[]
for i in range(len(zip_list)):
min_supp.append(zip_list[i][0])
min_func.append(zip_list[i][1])
# Return the minimum random variable
return PDF(RV(min_func,min_supp,['discrete','pdf']))
def Mixture(MixParameters,MixRVs):
"""
Procedure Name: Mixture
Purpose: Mixes random variables X1,X2,...,Xn
Arguments: 1. MixParameters: A mix of probability weights
2. MixRVs: RV's X1,X2,...,Xn
Output: 1. The mixture RV
"""
# Check to make sure that the arguments are lists
if type(MixParameters)!=list or type(MixRVs)!=list:
raise RVError('Both arguments must be in list format')
# Check to make sure the lists are of equal length
if len(MixParameters)!=len(MixRVs):
raise RVError('Mix parameter and RV lists must be the same length')
# Check to make sure that the mix parameters are numeric
# and sum to 1
'''
total=0
for i in range(len(MixParameters)):
if type(MixParameters[i])==Symbol:
raise RVError('ApplPy does not support symbolic mixtures')
total+=MixParameters[i]
if total<.9999 or total>1.0001:
raise RVError('Mix parameters must sum to one')
'''
# Check to ensure that the mix rv's are all of the same type
# (discrete or continuous)
for i in range(len(MixRVs)):
if MixRVs[0].ftype[0]!=MixRVs[i].ftype[0]:
raise RVError('Mix RVs must be all continuous or discrete')
# Convert the Mix RVs to their PDF form
Mixfx=[]
for i in range(len(MixRVs)):
Mixfx.append(PDF(MixRVs[i]))
# If the distributions are continuous, find and return the
# mixture pdf
if Mixfx[0].ftype[0]=='continuous':
#X1_dummy.drop_assumptions()
#X2_dummy.drop_assumptions()
# Compute the support of the mixture as the union of the supports
# of the mix rvs
MixSupp=[]
for i in range(len(Mixfx)):
for j in range(len(Mixfx[i].support)):
if Mixfx[i].support[j] not in MixSupp:
MixSupp.append(Mixfx[i].support[j])
MixSupp.sort()
# Compute and return the mixed PDF
fxnew=[]
for i in range(len(MixSupp)-1):
newMixfx=0
for j in range(len(MixParameters)):
m=len(Mixfx[j].support)-1
for k in range(m):
if Mixfx[j].support[k]<=MixSupp[i]:
if MixSupp[i+1]<=Mixfx[j].support[k+1]:
buildfx=Mixfx[j].func[k]*MixParameters[j]
newMixfx+=buildfx
simplify(newMixfx)
fxnew.append(newMixfx)
# Return the mixture rv
return RV(fxnew,MixSupp,['continuous','pdf'])
# If the two random variables are discrete in functinonal form,
# find and return the mixture of the two random variables
if RVar1.ftype[0]=='Discrete':
for num in RVar1.support:
if type(num) not in [int,float]:
err_string='Mixture does not currently work with'
err_string=' RVs that have symbolic or infinite support'
raise RVError(err_string)
RVar1=Convert(RVar1)
if RVar2.ftype[0]=='Discrete':
for num in RVar1.support:
if type(num) not in [int,float]:
err_string='Mixture does not currently work with'
err_string=' RVs that have symbolic or infinite support'
raise RVError(err_string)
RVar2=Convert(RVar2)
# If the distributions are discrete, find and return the
# mixture pdf
if Mixfx[0].ftype[0]=='discrete':
# Compute the mixture rv by summing over the weights
MixSupp=[]
fxnew=[]
for i in range(len(Mixfx)):
for j in range(len(Mixfx[i].support)):
if Mixfx[i].support[j] not in MixSupp:
MixSupp.append(Mixfx[i].support[j])
fxnew.append(Mixfx[i].func[j]*MixParameters[i])
else:
indx=MixSupp.index(Mixfx[i].support[j])
val=Mixfx[i].func[j]*MixParameters[i]
fxnew[indx]+=val
# Sort the values
zip_list=zip(MixSupp,fxnew)
zip_list.sort()
fxnew=[]
MixSupp=[]
for i in range(len(zip_list)):
fxnew.append(zip_list[i][1])
MixSupp.append(zip_list[i][0])
return RV(fxnew,MixSupp,['discrete','pdf'])
def Product(RVar1,RVar2):
"""
Procedure Name: Product
Purpose: Compute the product of two independent
random variables
Arguments: 1. RVar1: A random variable
2. RVar2: A random variable
Output: 1. The product of RVar1 and RVar2
"""
# If the random variable is continuous, find and return the
# product of the two random variables
if RVar1.ftype[0]=='continuous':
#X1_dummy.drop_assumptions()
#X2_dummy.drop_assumptions()
v=Symbol('v',positive=True)
# Place zero in the support of X if it is not there already
X1=PDF(RVar1)
xfunc=[]
xsupp=[]
for i in range(len(X1.func)):
xfunc.append(X1.func[i])
xsupp.append(X1.support[i])
if X1.support[i]<0:
if X1.support[i+1]>0:
xfunc.append(X1.func[i])
xsupp.append(0)
xsupp.append(X1.support[len(X1.support)-1])
X_dummy=RV(xfunc,xsupp,['continuous','pdf'])
# Place zero in the support of Y if it is not already there
Y1=PDF(RVar2)
yfunc=[]
ysupp=[]
for i in range(len(Y1.func)):
yfunc.append(Y1.func[i])
ysupp.append(Y1.support[i])
if Y1.support[i]<0:
if Y1.support[i+1]>0:
yfunc.append(Y1.func[i])
ysupp.append(0)
ysupp.append(Y1.support[len(Y1.support)-1])
Y_dummy=RV(yfunc,ysupp,['continuous','pdf'])
# Initialize the support list for the product V=X*Y
vsupp=[]
for i in range(len(X_dummy.support)):
for j in range(len(Y_dummy.support)):
val=X_dummy.support[i]*Y_dummy.support[j]
if val==nan:
val=0
if val not in vsupp:
vsupp.append(val)
vsupp.sort()
# Initialize the pdf segments of v
vfunc=[]
for i in range(len(vsupp)-1):
vfunc.append(0)
# Loop through each piecewise segment of X
for i in range(len(X_dummy.func)):
# Loop through each piecewise segment of Y
for j in range(len(Y_dummy.func)):
# Define the corner of the rectangular region
a=X_dummy.support[i]
b=X_dummy.support[i+1]
c=Y_dummy.support[j]
d=Y_dummy.support[j+1]
# If the region is in the first quadrant, compute the
# required integrals sequentially
if a>=0 and c>=0:
v=Symbol('v',positive=True)
if type(Y_dummy.func[j]) not in [float,int]:
gj=Y_dummy.func[j].subs(x,v/x)
else:
gj=Y_dummy.func[j]
fi=X_dummy.func[i]
pv=integrate(fi*gj*(1/x),(x,a,b))
if d<oo:
qv=integrate(fi*gj*(1/x),(x,v/d,b))
if c>0:
rv=integrate(fi*gj*(1/x),(x,a,v/c))
if c>0 and d<oo and a*d<b*c:
sv=integrate(fi*gj*(1/x),(x,v/d,v/c))
# 1st Qd, Scenario 1
if c==0 and d==oo:
for k in range(len(vfunc)):
if vsupp[k]>=0:
vfunc[k]+=pv
# 1st Qd, Scenario 2
if c==0 and d<oo:
for k in range(len(vfunc)):
if vsupp[k]>=0 and vsupp[k+1]<=a*d:
vfunc[k]+=pv
if vsupp[k]>=a*d and vsupp[k+1]<=b*d:
vfunc[k]+=qv
# 1st Qd, Scenario 3
if c>0 and d==oo:
for k in range(len(vfunc)):
if vsupp[k]>=b*c:
vfunc[k]+=pv
if vsupp[k]>=a*c and vsupp[k+1]<=b*c:
vfunc[k]+=rv
# 1st Qd, Scenario 4
if c>0 and d<oo:
# Case 1
if a*d<b*c:
for k in range(len(vfunc)):
if vsupp[k]>=a*c and vsupp[k+1]<=a*d:
vfunc[k]+=rv
if vsupp[k]>=a*d and vsupp[k+1]<=b*c:
vfunc[k]+=sv
if vsupp[k]>=b*c and vsupp[k+1]<=b*d:
vfunc[k]+=qv
# Case 2
if a*d==b*c:
for k in range(len(vfunc)):
if vsupp[k]>=a*c and vsupp[k+1]<=a*d:
vfunc[k]+=rv
if vsupp[k]>=b*c and vsupp[k+1]<=b*d:
vfunc[k]+=qv
# Case 3
if a*d>b*c:
for k in range(len(vfunc)):
if vsupp[k]>=a*c and vsupp[k+1]<=b*c:
vfunc[k]+=rv
if vsupp[k]>=b*c and vsupp[k+1]<=a*d:
vfunc[k]+=pv
if vsupp[k]>=a*d and vsupp[k+1]<=b*d:
vfunc[k]+=qv
# If the region is in the second quadrant, compute
# the required integrals sequentially
if a<0 and c<0:
v=Symbol('v',positive=True)
if type(Y_dummy.func[j]) not in [float,int]:
gj=Y_dummy.func[j].subs(x,v/x)
else:
gj=Y_dummy.func[j]
fi=X_dummy.func[i]
pv=-integrate(fi*gj*(1/x),(x,a,b))
if d<0:
qv=-integrate(fi*gj*(1/x),(x,(v/d),b))
if c>-oo:
rv=-integrate(fi*gj*(1/x),(x,a,(v/c)))
if c>-oo and d<0:
sv=-integrate(fi*gj*(1/x),(x,(v/d),(v/c)))
# 2nd Qd, Scenario 1
if c==-oo and d==0:
for k in range(len(vfunc)):
if vsupp[k]>=0:
vfunc[k]+=pv
# 2nd Qd, Scenario 2
if c==-oo and d<0:
for k in range(len(vfunc)):
if vsupp[k]>=a*d and vsupp[k+1]<=oo:
vfunc[k]+=pv
if vsupp[k]>=b*d and vsupp[k+1]<=a*d:
vfunc[k]+=qv
# 2nd Qd, Scenario 3
if c>-oo and d==0:
for k in range(len(vfunc)):
if vsupp[k]>=0 and vsupp[k+1]<=b*c:
vfunc[k]+=pv
if vsupp[k]>=b*c and vsupp[k+1]<=a*c:
vfunc[k]+=rv
# 2nd Qd, Scenario 4
if c>-oo and d<0:
# Case 1
if a*d>b*c:
for k in range(len(vfunc)):
if vsupp[k]>=a*d and vsupp[k+1]<=a*c:
vfunc[k]+=rv
if vsupp[k]>=b*c and vsupp[k+1]<=a*d:
vfunc[k]+=sv
if vsupp[k]>=b*d and vsupp[k+1]<=b*c:
vfunc[k]+=qv
# Case 2
if a*d==b*c:
for k in range(len(vfunc)):
if vsupp[k]>=a*d and vsupp[k+1]<=a*c:
vfunc[k]+=rv
if vsupp[k]>=b*d and vsupp[k+1]<=b*c:
vfunc[k]+=qv
# Case 3
if a*d<b*c:
for k in range(len(vfunc)):
if vsupp[k]>=b*c and vsupp[k+1]<=a*c:
vfunc[k]+=rv
if vsupp[k]>=a*d and vsupp[k+1]<=b*c:
vfunc[k]+=pv
if vsupp[k]>=b*d and vsupp[k+1]<=a*d:
vfunc[k]+=qv
# If the region is in the third quadrant, compute
# the required integrals sequentially
if a<0 and c>=0:
v=Symbol('v',negative=True)
if type(Y_dummy.func[j]) not in [float,int]:
gj=Y_dummy.func[j].subs(x,v/x)
else:
gj=Y_dummy.func[j]
fi=X_dummy.func[i]
pv=-integrate(fi*gj*(1/x),(x,a,b))
if d<oo:
qv=-integrate(fi*gj*(1/x),(x,a,(v/d)))
if c>0:
rv=-integrate(fi*gj*(1/x),(x,(v/b),c))
if c>0 and d<oo:
sv=-integrate(fi*gj*(1/x),(x,(v/c),(v/d)))
# 3rd Qd, Scenario 1
if c==0 and d==oo:
for k in range(len(vfunc)):
if vsupp[k+1]<=0:
vfunc[k]+=pv
# 3rd Qd, Scenario 2
if c==0 and d<oo:
for k in range(len(vfunc)):
if vsupp[k]>=b*d and vsupp[k+1]<=0:
vfunc[k]+=pv
if vsupp[k]>=a*d and vsupp[k+1]<=b*d:
vfunc[k]+=qv
# 3rd Qd, Scenario 3
if c>0 and d==oo:
for k in range(len(vfunc)):
if vsupp[k]>=-oo and vsupp[k+1]<=a*c:
vfunc[k]+=pv
if vsupp[k]>=a*c and vsupp[k+1]<=b*c:
vfunc[k]+=rv
# 3rd Qd, Scenario 4
if c>0 and d<oo:
# Case 1
if b*d>a*c:
for k in range(len(vfunc)):
if vsupp[k]>=b*d and vsupp[k+1]<=b*c:
vfunc[k]+=rv
if vsupp[k]>=a*c and vsupp[k+1]<=b*d:
vfunc[k]+=sv
if vsupp[k]>=a*d and vsupp[k+1]<=a*c:
vfunc[k]+=qv
# Case 2
if a*c==b*d:
for k in range(len(vfunc)):
if vsupp[k]>=a*d and vsupp[k+1]<=a*c:
vfunc[k]+=qv
if vsupp[k]>=b*d and vsupp[k+1]<=b*c:
vfunc[k]+=rv
# Case 3
if a*c>b*d:
for k in range(len(vfunc)):
if vsupp[k]>=a*c and vsupp[k+1]<=b*c:
vfunc[k]+=rv
if vsupp[k]>=b*d and vsupp[k+1]<=a*c:
vfunc[k]+=pv
if vsupp[k]>=a*d and vsupp[k+1]<=b*d:
vfunc[k]+=qv
# If the region is in the fourth quadrant, compute
# the required integrals sequentially
if a>=0 and c<0:
v=Symbol('v',negative=True)
if type(Y_dummy.func[j]) not in [float,int]:
gj=Y_dummy.func[j].subs(x,v/x)
else:
gj=Y_dummy.func[j]
fi=X_dummy.func[i]
pv=integrate(fi*gj*(1/x),(x,a,b))
if d<0:
qv=integrate(fi*gj*(1/x),(x,a,(v/d)))
if c>-oo:
rv=integrate(fi*gj*(1/x),(x,(v/c),b))
if c>-oo and d<0:
sv=integrate(fi*gj*(1/x),(x,(v/c),(v/d)))
# 4th Qd, Scenario 1
if c==oo and d==0:
for k in range(len(vfunc)):
if vsupp[k+1]<=0:
vfunc[k]+=pv
# 4th Qd, Scenario 2
if c==oo and d<0:
for k in range(len(vfunc)):
if vsupp[k]>=-oo and vsupp[k+1]<=b*d:
vfunc[k]+=pv
if vsupp[k]>=b*d and vsupp[k+1]<=a*d:
vfunc[k]+=qv
# 4th Qd, Scenario 3
if c>-oo and d==0:
for k in range(len(vfunc)):
if vsupp[k]>=a*c and vsupp[k+1]<=0:
vfunc[k]+=pv
if vsupp[k]>=b*c and vsupp[k+1]<=a*c:
vfunc[k]+=rv
# 4th Qd, Scenario 4
if c>-oo and d<0:
# Case 1
if a*c>b*d:
for k in range(len(vfunc)):
if vsupp[k]>=b*c and vsupp[k+1]<=b*d:
vfunc[k]+=rv
if vsupp[k]>=b*d and vsupp[k+1]<=a*c:
vfunc[k]+=sv
if vsupp[k]>=a*c and vsupp[k+1]<=a*d:
vfunc[k]+=qv
# Case 2
if a*d==b*c:
for k in range(len(vfunc)):
if vsupp[k]>=b*c and vsupp[k+1]<=a*c:
vfunc[k]+=rv
if vsupp[k]>=a*c and vsupp[k+1]<=a*d:
vfunc[k]+=qv
# Case 3
if a*c<b*d:
for k in range(len(vfunc)):
if vsupp[k]>=b*c and vsupp[k+1]<=a*c:
vfunc[k]+=rv
if vsupp[k]>=a*c and vsupp[k+1]<=b*d:
vfunc[k]+=pv
if vsupp[k]>=b*d and vsupp[k+1]<=a*d:
vfunc[k]+=qv
vfunc_final=[]
for i in range(len(vfunc)):
if type(vfunc[i]) not in [int,float]:
vfunc_final.append(simplify(vfunc[i]).subs(v,x))
else:
vfunc_final.append(vfunc[i])
return RV(vfunc_final,vsupp,['continuous','pdf'])
# If the two random variables are discrete in functinonal form,
# find and return the product of the two random variables
if RVar1.ftype[0]=='Discrete':
for num in RVar1.support:
if type(num) not in [int,float]:
err_string='Product does not currently work with'
err_string=' RVs that have symbolic or infinite support'
raise RVError(err_string)
RVar1=Convert(RVar1)
if RVar2.ftype[0]=='Discrete':
for num in RVar1.support:
if type(num) not in [int,float]:
err_string='Product does not currently work with'
err_string=' RVs that have symbolic or infinite support'
raise RVError(err_string)
RVar2=Convert(RVar2)
# If the distributions are discrete, find and return the product
# of the two random variables.
if RVar1.ftype[0]=='discrete':
# Convert each random variable to its pdf form
X1_dummy=PDF(RVar1)
X2_dummy=PDF(RVar2)
# Create function and support lists for the product of the
# two random variables
prodlist=[]
funclist=[]
for i in range(len(X1_dummy.support)):
for j in range(len(X2_dummy.support)):
prodlist.append(X1_dummy.support[i]*X2_dummy.support[j])
funclist.append(X1_dummy.func[i]*X2_dummy.func[j])
# Sort the function and support lists for the convolution
sortlist=zip(prodlist,funclist)
sortlist.sort()
prodlist2=[]
funclist2=[]
for i in range(len(sortlist)):
prodlist2.append(sortlist[i][0])
funclist2.append(sortlist[i][1])
# Remove redundant elements in the support list
prodlist3=[]
funclist3=[]
for i in range(len(prodlist2)):
if prodlist2[i] not in prodlist3:
prodlist3.append(prodlist2[i])
funclist3.append(funclist2[i])
else:
funclist3[prodlist3.index(prodlist2[i])]+=funclist2[i]
# Create and return the new random variable
return RV(funclist3,prodlist3,['discrete','pdf'])
def ProductDiscrete(RVar1,RVar2):
"""
Procedure Name: ProductDiscrete
Purpose: Compute the product of two independent
discrete random variables
Arguments: 1. RVar1: A random variable
2. RVar2: A random variable
Output: 1. The product of RVar1 and RVar2
"""
# Ensure that both random variables are discrete
if RVar1.ftype[0]!='discrete' or RVar2.ftype[0]!='discrete':
raise RVError('both random variables must be discrete')
# Convert both random variables to pdf form
X_dummy1=PDF(RVar1)
X_dummy2=PDF(RVar2)
# Convert the support and the value of each random variable
# into a numpy matrix
support1=np.matrix(X_dummy1.support)
support2=np.matrix(X_dummy2.support)
pdf1=np.matrix(X_dummy1.func)
pdf2=np.matrix(X_dummy2.func)
# Find all possible values of support1*support2 and val1*val2
# by computing (X1)'*X2, flatten into a row vector
prodsupport=support1.T*support2
prodsupport=prodsupport.flatten()
prodpdf=pdf1.T*pdf2
prodpdf=prodpdf.flatten()
#
# Stack the support vector and the value vector into a matrix
#prodmatrix=np.vstack([prodsupport,prodpdf]).T
#
#
# Convert the resulting vectors into lists
supportlist=prodsupport.tolist()[0]
pdflist=prodpdf.tolist()[0]
# Sort the function and support lists for the product
sortlist=zip(supportlist,pdflist)
sortlist.sort()
prodlist2=[]
funclist2=[]
for i in range(len(sortlist)):
prodlist2.append(sortlist[i][0])
funclist2.append(sortlist[i][1])
# Remove redundant elements in the support list
prodlist3=[]
funclist3=[]
for i in range(len(prodlist2)):
if prodlist2[i] not in prodlist3:
prodlist3.append(prodlist2[i])
funclist3.append(funclist2[i])
else:
funclist3[prodlist3.index(prodlist2[i])]+=funclist2[i]
# Create and return the new random variable
return RV(funclist3,prodlist3,['discrete','pdf'])
"""
Utilities
Procedures:
1. Histogram(Sample,bins)
2. LoadRV(filename)
3. PlotClear()
4. PlotDist(RVar,suplist)
5. PlotDisplay(plot_list,suplist)
6. PlotEmpCDF(data)
7. PlotLimits(limits, axis)
8. PPPlot(RVar,Sample)
9. QQPlot(RVar,Sample)
"""
def Histogram(Sample,Bins=None):
"""
Procedure: Histogram
Purpose: Construct a histogram from a sample of data
Arguments: 1. Sample: The data sample from which to construct
the histogram
2. bins: The number of bins in the histogram
Output: 1. A histogram plot
"""
# Check to ensure that the sample is given as a list
if type(Sample)!=list:
raise RVError('The data sample must be entered as a list')
Sample.sort()
if Bins==None:
Bins=1
for i in range(1,len(Sample)):
if Sample[i]!=Sample[i-1]:
Bins+=1
plt.ion()
plt.hist(Sample,bins=Bins,normed=True)
plt.ylabel('Relative Frequency')
plt.xlabel('Observation Value')
plt.title('Histogram')
plt.grid(True)
def LoadRV(filename):
"""
Procedure: LoadRV
Purpose: Load a random variable from a binary file
Aruments: 1. filename: the name of the file
where the random variable is stored
Output: 1. The stored random variable
"""
fileObject = open(filename, 'r')
RVar = pickle.load(fileObject)
if 'RV' not in RVar.__class__.__name__:
print 'WARNING: Object loaded is not a random variable'
return RVar
def PlotClear():
"""
Procedure: PlotClear
Purpose: Clears the plot display
Arguments: None
Output: 1. Clear plot display
"""
plt.clf()
def PlotLimits(limits, axis):
"""
Procedure: PlotLimits
Purpose: Sets the limits of a plot
Arguments: 1. limits: A list of plot limits
Output: 1. Plot with limits reset
"""
axes = plt.gca()
if axis == 'x':
axes.set_xlim(limits)
elif axis == 'y':
axes.set_ylim(limits)
else:
err_str = 'The axis parameter in PlotLimits must be "x" or "y"'
raise RVError(err_str)
def PlotDist(RVar,suplist=None,opt=None,color='r',
display=True):
"""
Procedure: PlotDist
Purpose: Plot a random variable
Arguments: 1. RVar: A random variable
2. suplist: A list of supports for the plot
Output: 1. A plot of the random variable
"""
# Create the labels for the plot
if RVar.ftype[1]=='cdf':
#lab1='F(x)'
lab2='Cumulative Distribution Function'
elif RVar.ftype[1]=='chf':
#lab1='H(x)'
lab2='Cumulative Hazard Function'
elif RVar.ftype[1]=='hf':
#lab1='h(x)'
lab2='Hazard Function'
elif RVar.ftype[1]=='idf':
#lab1='F-1(s)'
lab2='Inverse Density Function'
elif RVar.ftype[1]=='pdf':
#lab1='f(x)'
lab2='Probability Density Function'
elif RVar.ftype[1]=='sf':
#lab1='S(X)'
lab2='Survivor Function'
if opt=='EMPCDF':
lab2='Empirical CDF'
# If the distribution is continuous, plot the function
if RVar.ftype[0]=='continuous':
# Return an error if the plot supports are not
# within the support of the random variable
if suplist!=None:
if suplist[0]>suplist[1]:
raise RVError('Support list must be in ascending order')
if suplist[0]<RVar.support[0]:
raise RVError('Plot supports must fall within RV support')
if suplist[1]>RVar.support[1]:
raise RVError('Plot support must fall within RV support')
# Cut out parts of the distribution that don't fall
# within the limits of the plot
if suplist==None:
# Since plotting is numeric, the lower support cannot be -oo
if RVar.support[0]==-oo:
support1=float(RVar.variate(s=.01)[0])
else:
support1=float(RVar.support[0])
# Since plotting is numeric, the upper support cannot be oo
if RVar.support[len(RVar.support)-1]==oo:
support2=float(RVar.variate(s=.99)[0])
else:
support2=float(RVar.support[len(RVar.support)-1])
suplist=[support1,support2]
for i in range(len(RVar.func)):
if suplist[0]>=float(RVar.support[i]):
if suplist[0]<=float(RVar.support[i+1]):
lwindx=i
if suplist[1]>=float(RVar.support[i]):
if suplist[1]<=float(RVar.support[i+1]):
upindx=i
# Create a list of functions for the plot
plotfunc=[]
for i in range(len(RVar.func)):
if i>=lwindx and i<=upindx:
plotfunc.append(RVar.func[i])
# Create a list of supports for the plot
plotsupp=[suplist[0]]
upindx+=1
for i in range(len(RVar.support)):
if i>lwindx and i<upindx:
plotsupp.append(RVar.support[i])
plotsupp.append(suplist[1])
#print plotfunc, plotsupp
for i, function in enumerate(plotfunc):
f = lambda y: function.subs(x,y).evalf()
x_range = np.arange(plotsupp[i],
plotsupp[i+1],
abs(plotsupp[i+1]-plotsupp[i])/1000)
y_range = np.array( [f(num) for num in x_range])
plt.plot(x_range, y_range, color)
plt.title(lab2)
'''
Old plot method using the sympy plot
plt.ioff()
print plotfunc, plotsupp
initial_plot=plot(plotfunc[0],(x,plotsupp[0],plotsupp[1]),
title=lab2,show=False,line_color=color)
for i in range(1,len(plotfunc)):
plot_extension=plot(plotfunc[i],
(x,plotsupp[i],plotsupp[i+1]),
show=False,line_color=color)
initial_plot.append(plot_extension[0])
if display==True:
plt.ion()
initial_plot.show()
return initial_plot
else:
return initial_plot
'''
# Old PlotDist code before sympy created the
# plotting front-end
#print plotsupp
# Parse the functions for matplotlib
#plot_func=[]
#for i in range(len(plotfunc)):
# strfunc=str(plotfunc[i])
# plot_func.append(strfunc)
#print plot_func
# Display the plot
#if opt!='display':
# plt.ion()
#plt.mat_plot(plot_func,plotsupp,lab1,lab2,'continuous')
if RVar.ftype[0]=='discrete' or RVar.ftype[0]=='Discrete':
if RVar.ftype[0]=='Discrete':
if RVar.support[-1]!=oo:
RVar=Convert(RVar)
else:
p=1;i=RVar.support[0]
while p>.00001:
p=PDF(RVar,i).evalf()
i+=1
newsupport=RVar.support
newsupport[-1]=i
RVar=RV(RVar.func,newsupport,RVar.ftype)
RVar=Convert(RVar)
#if display==True:
# plt.ion()
#plt.mat_plot(RVar.func,RVar.support,lab1,lab2,'discrete')
plt.step(RVar.support,RVar.func)
#plt.xlabel('x')
#if lab1!=None:
# plt.ylabel(lab1)
if lab2!=None:
plt.title(lab2)
def PlotDisplay(plot_list):
if len(plot_list)<2:
raise RVError('PlotDisplay requires a list with multiple plots')
plt.ion()
totalplot=plot_list[0]
for graph in plot_list[1:]:
totalplot.append(graph[0])
totalplot.show()
def PlotEmpCDF(data):
"""
Procedure Name: PlotEmpCDF
Purpose: Plots an empirical CDF, given a data set
Arguments: 1. data: A data sample
Output: 1. An empirical cdf of the data
"""
# Create a bootstrap random variable from the data
Xstar=BootstrapRV(data)
PlotDist(CDF(Xstar),opt='EMPCDF')
def PPPlot(RVar,Sample):
"""
Procedure Name: PPPlot
Purpose: Plots the model probability versus the sample
probability
Arguments: 1. RVar: A random variable
2. Sample: An experimental sample
Output: 1. A PPPlot comparing the sample to a theoretical
model
"""
# Return an error message if the sample is not given as
# a list
if type(Sample)!=list:
raise RVError('The data sample must be given as a list')
# Create a list of quantiles
n=len(Sample)
Sample.sort()
plist=[]
for i in range(1,n+1):
p=(i-(1/2))/n
plist.append(p)
# Create a list of CDF values for the sample and the
# theoretical model
FX=CDF(RVar)
fxstar=BootstrapRV(Sample)
FXstar=CDF(fxstar)
FittedCDF=[]
ObservedCDF=[]
for i in range(len(plist)):
FittedCDF.append(CDF(FX,Sample[i]))
ObservedCDF.append(CDF(FXstar,Sample[i]))
# Plot the results
plt.ion()
plt.prob_plot(ObservedCDF,FittedCDF,'PP Plot')
def QQPlot(RVar,Sample):
"""
Procedure: QQPlot
Purpose: Plots the q_i quantile of a fitted distribution
versus the q_i quantile of the sample dist
Arguments: 1. RVar: A random variable
2. Sample: Sample data
Output: 1. QQ Plot
"""
# Return an error message is the sample is not given as
# a list
if type(Sample)!=list:
raise RVError('The data sample must be given as a list')
# Create a list of quantiles
n=len(Sample)
Sample.sort()
qlist=[]
for i in range(1,n+1):
q=(i-(1/2))/n
qlist.append(q)
# Create 'fitted' list
Fitted=[]
for i in range(len(qlist)):
Fitted.append(RVar.variate(s=qlist[i])[0])
# Plot the results
plt.ion()
plt.prob_plot(Sample,Fitted,'QQ Plot') | APPLPy | /APPLPy-0.4.12.tar.gz/APPLPy-0.4.12/applpy/rv.py | rv.py |
from __future__ import division
from sympy import (Symbol, symbols, oo, integrate, summation, diff,
exp, pi, sqrt, factorial, ln, floor, simplify,
solve, nan, Add, Mul, Integer, function,
binomial,gamma,cos,cot,Rational,atan,log)
from random import random
from .rv import (RV, RVError, CDF, CHF, HF, IDF, IDF, PDF, SF,
BootstrapRV, Convert)
from .bivariate import (BivariateRV)
x,y,z,t,v=symbols('x y z t v')
"""
A Probability Progamming Language (APPL) -- Python Edition
Copyright (C) 2001,2002,2008,2010,2014 Andrew Glen, Larry
Leemis, Diane Evans, Matthew Robinson
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
"""
def param_check(param):
flag=True
count=0
for element in param:
try:
if element.__class__.name=='Symbol':
flag=False
except:
if type(element)=='Symbol':
flag=False
return flag
"""
Continuous Distributions
"""
class ArcSinRV(RV):
"""
Procedure Name: ArcSinRV
Purpose: Creates an instance of the arc sin distribution
Arguments: 1. None
Output: 1. An arc sin random variable
"""
def __init__(self):
# x = Symbol('x', postive=True)
X_dummy=RV(1/(pi*sqrt(x*(1-x))),[0,1])
self.func=X_dummy.func
self.support=X_dummy.support
self.ftype=X_dummy.ftype
self.cache={}
class ArcTanRV(RV):
"""
Procedure Name: ArcTanRV
Purpose: Creates an instance of the arc tan distribution
Arguments: 1. alpha: a strictly positive parameter
Output: 1. An arc tan random variable
"""
def __init__(self,alpha=Symbol('alpha',positive=True),
phi=Symbol('phi')):
# Return an error if invalid parameters are entered
if alpha in [-oo,oo]:
if phi in [-oo,oo]:
err_string='Both parameters must be finite'
raise RVError(err_string)
if alpha <= 0:
err_string='Alpha must be positive'
raise RVError(err_string)
X_dummy=RV(alpha/((atan(alpha*phi)+pi/2)*(1+alpha**2*(x-phi)**2)),
[0,oo])
self.func=X_dummy.func
self.support=X_dummy.support
self.ftype=X_dummy.ftype
self.cache={}
class BetaRV(RV):
"""
Procedure Name: BetaRV
Purpose: Creates an instance of the beta distribution
Arguments: 1. alpha: a strictly positive parameter
2. beta: a strictly positive parameter
Output: 1. A beta random variable
"""
def __init__(self,alpha=Symbol('alpha',positive=True),
beta=Symbol('beta'),positive=True):
#x = Symbol('x', positive = True)
if alpha in [-oo,oo]:
if beta in [-oo,oo]:
err_string='Both parameters must be finite'
raise RVError(err_string)
if alpha<=0 and alpha.__class__.__name__!='Symbol' :
if beta<=0 and beta.__class__.name__!='Symbol':
err_string='Both parameters must be positive'
raise RVError(err_string)
X_dummy=RV((gamma(alpha+beta)*(x**(alpha-1))*(1-x)**(beta-1))/
(gamma(alpha)*gamma(beta)),[0,1])
self.func=X_dummy.func
self.support=X_dummy.support
self.ftype=X_dummy.ftype
self.cache={}
class CauchyRV(RV):
"""
Procedure Name: CauchyRV
Purpose: Creates an instance of the Cauchy distribution
Arguments: 1. a: a real valued parameter
2. alpha: a stictly positive parameter
Output: 1. A Cauchy random variable
"""
def __init__(self,a=Symbol('a'),
alpha=Symbol('alpha'),positive=True):
if a in [-oo,oo]:
err_string='Both parameters must be finite'
if alpha in [-oo,oo]:
raise RVError(err_string)
if alpha.__class__.__name__!='Symbol':
if alpha<=0:
err_string='alpha must be positive'
raise RVError(err_string)
X_dummy=RV((1)/(alpha*pi*(1+((x-a)**2/alpha**2))),[-oo,oo])
self.func=X_dummy.func
self.support=X_dummy.support
self.ftype=X_dummy.ftype
self.parameter=[a,alpha]
self.cache={}
def variate(self,n=1,s=None,method='special'):
# If no parameter is specified, return an error
if param_check(self.parameter)==False:
raise RVError('Not all parameters specified')
# Check to see if the user specified a valid method
method_list=['special','inverse']
if method not in method_list:
error_string='an invalid method was specified'
raise RVError(error_string)
# If the inverse method is specified, compute variates using
# the IDF function
if method=='inverse':
Xidf=IDF(self)
varlist=[IDF(Xidf,random()) for i in range(1,n+1)]
return varlist
# Generate cauchy variates
idf_func=self.parameter[0]-cot(pi*t)*self.parameter[1]
varlist=[]
for i in range(n):
if s==None:
val=random()
else:
val=s
var=idf_func.subs(t,val).evalf()
varlist.append(var)
varlist.sort()
return varlist
class ChiRV(RV):
"""
Procedure Name: ChiRV
Purpose: Creates an instance of the chi distribution
Arguments: 1. N: a positive integer parameter
Output: 1. A chi random variable
"""
def __init__(self,N=Symbol('N',positive=True,
integer=True)):
#x = Symbol('x', positive = True)
if N.__class__.__name__!='Symbol':
if N<=0 or type(N)!=int:
err_string='N must be a positive integer'
raise RVError(err_string)
X_dummy=RV(((x**(N-1))*exp(-x**2/2))/
(2**((Rational(N,2))-1)*gamma(Rational(N,2))),[0,oo])
self.func=X_dummy.func
self.support=X_dummy.support
self.ftype=X_dummy.ftype
self.cache={}
class ChiSquareRV(RV):
"""
Procedure Name: ChiSquareRV
Purpose: Creates an instance of the chi square distribution
Arguments: 1. N: a positive integer parameter
Output: 1. A chi squared random variable
"""
def __init__(self,N=Symbol('N',positive=True,
integer=True)):
#x = Symbol('x', positive = True)
if N.__class__.__name__!='Symbol':
if N<=0 or type(N)!=int:
err_string='N must be a positive integer'
raise RVError(err_string)
X_dummy=RV((x**((N/2)-1)*exp(-x/2))/
(2**(N/2)*gamma(N/2)),[0,oo])
self.func=X_dummy.func
self.support=X_dummy.support
self.ftype=X_dummy.ftype
self.cache={}
class ErlangRV(RV):
"""
Procedure Name: ErlangRV
Purpose: Creates an instance of the Erlang distribution
Arguments: 1. theta: a strictly positive parameter
2. N: a positive integer parameter
Output: 1. An erlang random variable
"""
def __init__(self,theta=Symbol('theta',positive=True),
N=Symbol('N',positive=True,integer=True)):
#x = Symbol('x', positive = True)
if N.__class__.__name__!='Symbol':
if N<=0 or type(N)!=int:
err_string='N must be a positive integer'
raise RVError(err_string)
if theta.__class__.__name__!='Symbol':
if theta<=0:
err_string='theta must be positive'
raise RVError(err_string)
if theta in [-oo,oo]:
err_string='both parameters must be finite'
raise RVError(err_string)
X_dummy=RV((theta*(theta*x)**(N-1)*exp(-theta*x))/
(factorial(N-1)),[0,oo])
self.func=X_dummy.func
self.support=X_dummy.support
self.ftype=X_dummy.ftype
self.cache={}
class ErrorRV(RV):
"""
Procedure Name: ErrorRV
Purpose: Creates an instance of the error distribution
Arguments: 1. mu: a strictly positive parameter
2. alpha: a real valued parameter
3. d: a real valued parameter
Output: 1. An error random variable
"""
def __init__(self,mu=Symbol('mu',positive=True),
alpha=Symbol('alpha'),d=Symbol('d')):
if mu.__class__.__name__!='Symbol':
if mu<=0:
err_string='mu must be positive'
raise RVError(err_string)
if mu in [-oo,oo]:
if alpha in [-oo,oo]:
if d in [-oo,oo]:
err_string='all parameters must be finite'
raise RVError(err_string)
X_dummy=RV(mu*exp(-abs(mu*(x-d))**alpha)/
(2*gamma(1+1/alpha)),[-oo,oo])
self.func=X_dummy.func
self.support=X_dummy.support
self.ftype=X_dummy.ftype
self.cache={}
class ErrorIIRV(RV):
"""
Procedure Name: ErrorIIRV
Purpose: Creates an instance of the error II distribution
Arguments: 1. a: a real valued parameter
2. b: a real valued parameter
3. c: a real valued parameter
Output: 1. An error II random variable
"""
def __init__(self,a=Symbol('a'),b=Symbol('b'),
c=Symbol('c')):
if a in [-oo,oo]:
if b in [-oo,oo]:
if c in [-oo,oo]:
err_string='all parameters must be finite'
raise RVError(err_string)
X_dummy=RV(exp(-((abs(x-a))**(2/c)/(2*b))/
((b**(c/2))*2**(c/2+1)*gamma(c/2+1))),
[-oo,oo])
self.func=X_dummy.func
self.support=X_dummy.support
self.ftype=X_dummy.ftype
self.cache={}
class ExponentialRV(RV):
"""
Procedure Name: ExponentialRV
Purpose: Creates an instance of the exponential distribution
Arguments: 1. theta: a strictly positive parameter
Output: 1. An exponential random variable
"""
def __init__(self,theta=Symbol('theta',positive=True)):
#x = Symbol('x', positive = True)
if theta.__class__.__name__!='Symbol':
if theta<=0:
err_string='theta must be positive'
raise RVError(err_string)
if theta in [-oo,oo]:
err_string='theta must be finite'
raise RVError(err_string)
X_dummy=RV([theta*exp(-theta*x)],[0,oo])
self.func=X_dummy.func
self.support=X_dummy.support
self.ftype=X_dummy.ftype
self.parameter=[theta]
self.cache={}
def variate(self,n=1,s=None,method='special'):
# If no parameter is specified, return an error
if param_check(self.parameter)==False:
raise RVError('Not all parameters specified')
# Check to see if the user specified a valid method
method_list=['special','inverse']
if method not in method_list:
error_string='an invalid method was specified'
raise RVError(error_string)
# If the inverse method is specified, compute variates using
# the IDF function
if method=='inverse':
Xidf=IDF(self)
varlist=[IDF(Xidf,random()) for i in range(1,n+1)]
return varlist
# Generate exponential variates
idf_func=(-ln(1-t))/(self.parameter[0])
varlist=[]
for i in range(n):
if s==None:
val=random()
else:
val=s
var=idf_func.subs(t,val)
varlist.append(var)
varlist.sort()
return varlist
class ExponentialPowerRV(RV):
"""
Procedure Name: ExponentialPowerRV
Purpose: Creates an instance of the exponential power distribution
Arguments: 1. theta: a strictly positive parameter
2. kappa: a strictly positive parameter
Output: 1. An exponential power random variable
"""
def __init__(self,theta=Symbol('theta',positive=True),
kappa=Symbol('kappa',positive=True)):
#x = Symbol('x', positive = True)
if theta.__class__.__name__!='Symbol':
if kappa.__class__.__name__!='Symbol':
if theta<=0 or kappa<=0:
err_string='both parameters must be positive'
raise RVError(err_string)
X_dummy=RV(exp(1-exp(theta*x**(kappa)))*exp(theta*x**(kappa))*
theta*kappa*x**(kappa-1),[0,oo])
self.func=X_dummy.func
self.support=X_dummy.support
self.ftype=X_dummy.ftype
self.parameter=[theta,kappa]
self.cache={}
def variate(self,n=1,s=None,method='special'):
# If no parameter is specified, return an error
if param_check(self.parameter)==False:
raise RVError('Not all parameters specified')
# Check to see if the user specified a valid method
method_list=['special','inverse']
if method not in method_list:
error_string='an invalid method was specified'
raise RVError(error_string)
# If the inverse method is specified, compute variates using
# the IDF function
if method=='inverse':
Xidf=IDF(self)
varlist=[IDF(Xidf,random()) for i in range(1,n+1)]
return varlist
# Generate exponential power variates
idf_func=exp((-ln(self.parameter[0])+ln(ln(1-ln(1-s))))/
self.parameter[1])
varlist=[]
for i in range(n):
if s==None:
val=random()
else:
val=s
var=idf_func.subs(t,val)
varlist.append(var)
varlist.sort()
return varlist
class ExtremeValueRV(RV):
"""
Procedure Name: ExtremeValueRV
Purpose: Creates an instance of the extreme value distribution
Arguments: 1. alpha: a real valued parameter
2. beta: a real valued parameter
Output: 1. An extreme value random variable
"""
def __init__(self,alpha=Symbol('alpha'),beta=Symbol('beta')):
if alpha in [-oo,oo]:
if beta in [-oo,oo]:
err_string='both parameters must be finite'
raise RVError(err_string)
X_dummy=RV((beta*exp((x*beta)-((exp(x*beta))/alpha)))/
alpha,[-oo,oo])
self.func=X_dummy.func
self.support=X_dummy.support
self.ftype=X_dummy.ftype
self.parameter=[alpha,beta]
self.cache={}
def variate(self,n=1,s=None,method='special'):
if param_check(self.parameter)==False:
raise RVError('Not all parameters specified')
# Check to see if the user specified a valid method
method_list=['special','inverse']
if method not in method_list:
error_string='an invalid method was specified'
raise RVError(error_string)
# If the inverse method is specified, compute variates using
# the IDF function
if method=='inverse':
Xidf=IDF(self)
varlist=[IDF(Xidf,random()) for i in range(1,n+1)]
return varlist
idf_func=(ln(self.parameter[0])+ln(ln(-1/(t-1))))/self.parameter[1]
varlist=[]
for i in range(n):
if s==None:
val=random()
else:
val=s
var=idf_func.subs(t,val)
varlist.append(var)
return varlist
class FRV(RV):
"""
Procedure Name: FRV
Purpose: Creates an instance of the f distribution
Arguments: 1. n1: a strictly positive parameter
2. n2: a strictly positive parameter
Output: 1. A chi squared random variable
"""
def __init__(self,n1=Symbol('n1',positive=True),
n2=Symbol('n2',positive=True)):
#x = Symbol('x', positive = True)
if n1.__class__.__name__!='Symbol':
if n2.__class__.__name__!='Symbol':
if n1<=0 or n2<=0:
err_string='both parameters must be positive'
raise RVError(err_string)
if n1 in [-oo,oo] or n2 in [-oo,oo]:
err_string='both parameters must be finite'
raise RVError(err_string)
X_dummy=RV(gamma((n1+n2)/2)*(n1/n2)**(n1/2)*x**(n1/2-1)/
gamma(n1/2)*gamma(n2/2)*((n1/n2)*x+1)**((n1+n2)/2),
[0,oo])
self.func=X_dummy.func
self.support=X_dummy.support
self.ftype=X_dummy.ftype
self.cache={}
class GammaRV(RV):
"""
Procedure Name: GammaRV
Purpose: Creates an instance of the gamma distribution
Arguments: 1. theta: a strictly positive parameter
2. kappa: a strictly positive parameter
Output: 1. A chi squared random variable
"""
def __init__(self,theta=Symbol('theta',positive=True),
kappa=Symbol('kappa',positive=True)):
#x = Symbol('x', positive = True)
if theta.__class__.__name__!='Symbol':
if kappa.__class__.__name__!='Symbol':
if theta<=0 or kappa<=0:
err_string='both parameters must be positive'
raise RVError(err_string)
if theta in [-oo,oo] or kappa in [-oo,oo]:
err_string='both parameters must be finite'
raise RVError(err_string)
X_dummy=RV((theta*(theta*x)**(kappa-1)*exp(-theta*x))/(gamma(kappa)),
[0,oo])
self.func=X_dummy.func
self.support=X_dummy.support
self.ftype=X_dummy.ftype
self.parameter=[theta,kappa]
self.cache={}
class GeneralizedParetoRV(RV):
"""
Procedure Name: GeneralizedParetoRV
Purpose: Creates an instance of the generalized pareto distribution
Arguments: 1. theta: a strictly positive parameter
2. delta: a real valued parameter
3. kappa: a real valued parameter
Output: 1. A generalized pareto random variable
"""
def __init__(self,theta=Symbol('theta',positive=True),
delta=Symbol('delta'),kappa=Symbol('kappa')):
x = Symbol('x', positive = True)
if theta.__class__.__name__!='Symbol':
if theta<=0:
err_string='theta must be positive'
raise RVError(err_string)
if theta in [-oo,oo] or delta in [-oo,oo] or kappa in [-oo,oo]:
err_string='all parameters must be finite'
raise RVError(err_string)
X_dummy=RV((theta+kappa/(x+delta))*(1+x/delta)**(-kappa)*
exp(-theta*x),[0,oo])
self.func=X_dummy.func
self.support=X_dummy.support
self.ftype=X_dummy.ftype
self.cache={}
class GompertzRV(RV):
"""
Procedure Name: GompertzRV
Purpose: Creates an instance of the gompertz distribution
Arguments: 1. theta: a strictly positive parameter
2. kappa: a real valued parameter
Output: 1. A gompertz random variable
"""
def __init__(self,theta=Symbol('theta',positive=True),
kappa=Symbol('kappa')):
#x = Symbol('x', positive = True)
if theta.__class__.__name__!='Symbol':
if theta<=0:
err_string='theta must be positive'
raise RVError(err_string)
if theta in [-oo,oo] or kappa in [-oo,oo]:
err_string='both parameters must be finite'
raise RVError(err_string)
X_dummy=RV([theta*kappa**(x)*exp(-(theta*(kappa**(x)-1))/ln(kappa))],
[0,oo])
self.func=X_dummy.func
self.support=X_dummy.support
self.ftype=X_dummy.ftype
self.parameter=[theta,kappa]
self.cache={}
def variate(self,n=1,s=None,method='special'):
if param_check(self.parameter)==False:
raise RVError('Not all parameters specified')
# Check to see if the user specified a valid method
method_list=['special','inverse']
if method not in method_list:
error_string='an invalid method was specified'
raise RVError(error_string)
# If the inverse method is specified, compute variates using
# the IDF function
if method=='inverse':
Xidf=IDF(self)
varlist=[IDF(Xidf,random()) for i in range(1,n+1)]
return varlist
idf_func=-((ln(self.parameter[0])-
ln(self.parameter[0]-ln(1-t)*ln(self.parameter[1])))
/ln(self.parameter[1]))
varlist=[]
for i in range(n):
if s==None:
val=random()
else:
val=s
var=idf_func.subs(t,val)
varlist.append(var)
return varlist
class IDBRV(RV):
"""
Procedure Name: IDBRV
Purpose: Creates an instance of the idb distribution
Arguments: 1. theta: a real valued parameter
2. delta: a real valued parameter
3. kappa: a real valued parameter
Output: 1. An idb random variable
"""
def __init__(self,theta=Symbol('theta'),delta=Symbol('delta'),
kappa=Symbol('kappa')):
#x = Symbol('x', positive = True)
if theta in [-oo,oo] or delta in [-oo,oo] or kappa in [-oo,oo]:
err_string='all parameters must be finite'
raise RVError(err_string)
X_dummy=RV(1-(1+kappa*x)**(-theta/kappa)*
exp(-delta*x**2/2),[0,oo])
self.func=X_dummy.func
self.support=X_dummy.support
self.ftype=X_dummy.ftype
self.cache={}
class InverseGaussianRV(RV):
"""
Procedure Name: InverseGaussianRV
Purpose: Creates an instance of the inverse gaussian distribution
Arguments: 1. theta: a strictly positive parameter
2. mu: a strictly positive parameter
Output: 1. An inverse gaussian random variable
"""
def __init__(self,theta=Symbol('theta',positive=True),
mu=Symbol('mu',positive=True)):
#x = Symbol('x', positive = True)
if theta.__class__.__name__!='Symbol':
if mu.__class__.__name__!='Symbol':
if theta<=0 or mu<=0:
err_string='both parameters must be positive'
raise RVError(err_string)
if theta in [-oo,oo] or mu in [-oo,oo]:
err_string='both parameters must be finite'
raise RVError(err_string)
X_dummy=RV([(1/2)*sqrt(2)*sqrt(theta/(pi*x**3))*
exp(-(1/2)*(theta*(x-mu)**2)/(mu**(2)*x))],
[0,oo])
self.func=X_dummy.func
self.support=X_dummy.support
self.ftype=X_dummy.ftype
self.cache={}
class InverseGammaRV(RV):
"""
Procedure Name: InverseGammaRV
Purpose: Creates an instance of the inverse gamma distribution
Arguments: 1. alpha: a strictly positive parameter
2. beta: a strictly positive parameter
Output: 1. An inverse gamma random variable
"""
def __init__(self,alpha=Symbol('alpha',positive=True),
beta=Symbol('beta',positive=True)):
# x = Symbol('x', positive = True)
if alpha.__class__.__name__!='Symbol':
if beta.__class__.__name__!='Symbol':
if alpha<=0 or beta<=0:
err_string='both parameters must be positive'
raise RVError(err_string)
if alpha in [-oo,oo] or beta in [-oo,oo]:
err_string='both parameters must be finite'
X_dummy=RV([(x**(1-alpha)*exp(-1/(x*beta)))/
(gamma(alpha)*beta**(alpha))],
[0,oo])
self.func=X_dummy.func
self.support=X_dummy.support
self.ftype=X_dummy.ftype
self.cache={}
class KSRV(RV):
"""
Procedure Name: KSRV
Purpose: Creates an instance of the kolmogoroff-smirnov distribution
Arguments: 1. n: a positive integer parameter
Output: 1. A kolmogoroff-smirnov random variable
"""
def __init__(self,n=Symbol('n',positive=True,
integer=True)):
if n.__class__.__name__!='Symbol':
if n<=0:
if type(n)!=int:
err_string='n must be a positive integer'
raise RVError(err_string)
#Phase 1
N=n
m=floor(3*N/2)+(N%2)-1
vv=range(m+1)
vvalue=[]
for i in range(len(vv)):
vvalue.append(0)
vv=dict(zip(vv,vvalue))
vv[0]=0
g=1/(2*N)
mm=0
for i in range(1,N):
mm+=1
vv[mm]=i*g
for j in range(2*floor(N/2)+1,2*N,2):
mm+=1
vv[mm]=j*g
#Phase 2
# Generate the c array
cidx=[];cval=[]
for k in range(1,m+1):
cidx.append(k)
cval.append((vv[k-1]+vv[k])/2)
c=dict(zip(cidx,cval))
# Generate the x array
xidx=[];xval=[]
for k in range(1,N+1):
xidx.append(k)
xval.append((2*k-1)*g)
x=dict(zip(xidx,xval))
# Generate an NxN A array
aidx=range(1,N+1);aval=[]
for i in aidx:
aval.append(0)
arow=dict(zip(aidx,aval));A=dict(zip(aidx,aval))
for i in aidx:
A[i]=arow
# Insert values into the A array
for i in range(2,N+1):
for j in range(1,i):
A[i][j]=0
for k in range(1,m+1):
for i in range(1,N+1):
for j in range(i,N+1):
A[i][j]=0
z=max(floor(N*c[k]-1/2),0)
l=min(floor(2*N*c[k])+1,N)
for i in range(1,N+1):
for j in range(max(i,z+1),min(N,i+l-1)+1):
A[i][j]=1
# Create a 1xm P array
Pidx=[];Pval=[]
for i in range(1,m+1):
Pidx.append(i)
Pval.append(0)
P=dict(zip(Pidx,Pval))
# Create an NxN F array
fidx=range(1,N+1);fval=[]
for i in fidx:
fval.append(0)
frow=dict(zip(fidx,fval));F=dict(zip(fidx,fval))
for i in fidx:
F[i]=frow
# Create an NxN V array
vidx=range(1,N+1);vval=[]
for i in vidx:
vval.append(0)
vrow=dict(zip(vidx,vval));V=dict(zip(vidx,vval))
for i in vidx:
V[i]=vrow
# Create a list of indexed u variables
varstring='u:'+str(N+1)
u=symbols(varstring)
for k in range(2,m+1):
z=int(max(floor(N*c[k]-1/2),0))
l=int(min(floor(2*N*c[k])+1,N))
F[N][N]=integrate(1,(u[N],x[N]-v,1))
V[N][N]=integrate(1,(u[N],u[N-1],1))
for i in range(N-1,1,-1):
if i+l>N:
S=0
else:
S=F[i+1][i+l]
if i+l>N+1:
F[i][N]=integrate(V[i+1][N],
(u[i],x[N]-v,floor(x[i]+c[k])))
V[i][N]=integrate(V[i+1][N],
(u[i],u[i-1],floor(x[i]+c[k])))
if i+l==N+1:
F[i][N]=integrate(V[i+1][N],
(u[i],x[N]-v,x[i]+v))
if i+l<N+1:
F[i][i+l-1]=integrate(V[i+1][i+l-1]+S,
(u[i],x[N]-v,x[i]+v))
S+=F[i+1][min(i+l-1,N)]
for j in range(min(N-1,i+l-2),max(i+1-1,z+2-1),-1):
F[i][j]=integrate(V[i+1][j]+S,
(u[i],x[j]-v,x[j+1]-v))
V[i][j]=integrate(V[i+1][j]+S,
(u[i],u[i-1],x[j+1]-v))
S+=F[i+1][j]
if z+1<=i:
V[i][i]=integrate(S,(u[i],u[i-1],x[i+1]-v))
if z+1>i:
V[i][z+1]=integrate(V[i+1][z+1]+S,
(u[i],u[i-1],x[z+2]-v))
if z+1<i:
F[i][i]=integrate(S,(u[i],x[i]-v,x[i+1]-v))
if l==N:
S=0
F[1][N] = integrate(V[2][N],(u[1],x[N]-v,x[1]+v))
else:
S=F[2][l+1]
if l<N:
F[1][l]=integrate(V[2][l]+S,(u[1],x[l]-v,x[1]+v))
S+=F[2][j]
for j in range(min(N-1,i+l-2),max(i,z+1),-1):
F[1][j]=integrate(V[2][j]+S,
(u[1],(x[j]-v)*(floor(x[j]-c[k])+1),
x[j+1]-v))
S+=F[2][j]
if z==0:
F[1][1]=integrate(S,(u[1],0,x[2]-v))
P[k]=0
for j in range(z+1,l+1):
P[k]+=F[1][j]
P[k]=factorial(N)*P[k]
# Create the support and function list for the KSRV
KSspt=[];KSCDF=[]
x=Symbol('x')
for i in range(0,m+1):
KSspt.append(vv[i]+1/(2*N))
for i in range(1,m+1):
func = P[i]
if type(func) in [int,float]:
ksfunc = func
else:
ksfunc = func.subs(v, (x-1/(2*N)))
KSCDF.append(simplify(ksfunc))
# Remove redundant elements from the list
KSCDF2=[];KSspt2=[]
KSspt2.append(KSspt[0])
KSCDF2.append(KSCDF[0])
for i in range(1,len(KSCDF)):
if KSCDF[i]!=KSCDF[i-1]:
KSCDF2.append(KSCDF[i])
KSspt2.append(KSspt[i])
KSspt2.append(KSspt[-1])
X_dummy=RV(KSCDF,KSspt,['continuous','cdf'])
self.func=X_dummy.func
self.support=X_dummy.support
self.ftype=X_dummy.ftype
self.cache={}
class LaPlaceRV(RV):
"""
Procedure Name: LaPlaceRV
Purpose: Creates an instance of the LaPlace distribution
Arguments: 1. omega: a strictly positive parameter
2. theta: a real valued parameter
Output: 1. A LaPlace random variable
"""
def __init__(self,omega=Symbol('omega',positive=True),
theta=Symbol('theta')):
if omega.__class__.__name__!='Symbol':
if omega<=0:
err_string='omega must be positive'
if omega in [-oo,oo] or theta in [-oo,oo]:
err_string='both parameters must be finite'
raise RVError(err_string)
X_dummy=RV(exp(-abs(x-theta)/omega)/(2*omega),[-oo,oo])
self.func=X_dummy.func
self.support=X_dummy.support
self.ftype=X_dummy.ftype
self.cache={}
class LogGammaRV(RV):
"""
Procedure Name: LogGammaRV
Purpose: Creates an instance of the log gamma distribution
Arguments: 1. alpha: a strictly positive parameter
2. beta: a strictly positive parameter
Output: 1. A log gamma random variable
"""
def __init__(self,alpha=Symbol('alpha',positive=True),
beta=Symbol('beta',positive=True)):
if alpha.__class__.__name__!='Symbol':
if beta.__class__.__name__!='Symbol':
if alpha<=0 or beta<=0:
err_string='both parameters must be positive'
raise RVError(err_string)
if alpha in [-oo,oo] or beta in [-oo,oo]:
err_string='both parameters must be finite'
raise RVError(err_string)
X_dummy=RV([(exp(x*beta)*exp(-exp(x)/alpha))/
(alpha**(beta)*gamma(beta))],[-oo,oo])
self.func=X_dummy.func
self.support=X_dummy.support
self.ftype=X_dummy.ftype
self.cache={}
class LogisticRV(RV):
"""
Procedure Name: LogisticRV
Purpose: Creates an instance of the logistic distribution
Arguments: 1. kappa: a strictly positive parameter
2. theta: a strictly positive parameter
Output: 1. A logistic random variable
"""
def __init__(self,kappa=Symbol('kappa',positive=True),
theta=Symbol('theta',positive=True)):
if kappa.__class__.__name__!='Symbol':
if theta.__class__.__name__!='Symbol':
if kappa<=0 or theta<=0:
err_string='both parameters must be positive'
raise RVError(err_string)
if kappa in [-oo,oo] or theta in [-oo,oo]:
err_string='both parameters must be finite'
raise RVError(err_string)
X_dummy=RV([(theta**(kappa)*kappa*exp(kappa*x))/
(1+(theta*exp(x))**kappa)**2],[-oo,oo])
self.func=X_dummy.func
self.support=X_dummy.support
self.ftype=X_dummy.ftype
self.parameter=[kappa,theta]
self.cache={}
def variate(self,n=1,s=None,method='special'):
if param_check(self.parameter)==False:
raise RVError('Not all parameters specified')
# Check to see if the user specified a valid method
method_list=['special','inverse']
if method not in method_list:
error_string='an invalid method was specified'
raise RVError(error_string)
# If the inverse method is specified, compute variates using
# the IDF function
if method=='inverse':
Xidf=IDF(self)
varlist=[IDF(Xidf,random()) for i in range(1,n+1)]
return varlist
idf_func=-((ln(-t/(t-1))+self.parameter[0]*ln(self.parameter[1]))/
self.parameter[1])
varlist=[]
for i in range(n):
if s==None:
val=random()
else:
val=s
var=idf_func.subs(t,val)
varlist.append(var)
return varlist
class LogLogisticRV(RV):
"""
Procedure Name: LogLogisticRV
Purpose: Creates an instance of the log logistic distribution
Arguments: 1. theta: a strictly positive parameter
2. kappa: a strictly positive parameter
Output: 1. A chi squared random variable
"""
def __init__(self,theta=Symbol('theta',positive=True),
kappa=Symbol('kappa',positive=True)):
#x = Symbol('x', positive = True)
if kappa.__class__.__name__:
if theta.__class__.__name__:
if theta<=0 or kappa<=0:
err_string='both parameters must be positive'
raise RVError(err_string)
if theta in [-oo,oo] or kappa in [-oo,oo]:
err_string='both parameters must be finite'
raise RVError(err_string)
X_dummy=RV([(theta*kappa*(theta*x)**(kappa-1))/
(1+(theta*x)**(kappa))**2],[0,oo])
self.func=X_dummy.func
self.support=X_dummy.support
self.ftype=X_dummy.ftype
self.parameter=[theta,kappa]
self.cache={}
def variate(self,n=1,s=None,method='special'):
if param_check(self.parameter)==False:
raise RVError('Not all parameters specified')
# Check to see if the user specified a valid method
method_list=['special','inverse']
if method not in method_list:
error_string='an invalid method was specified'
raise RVError(error_string)
# If the inverse method is specified, compute variates using
# the IDF function
if method=='inverse':
Xidf=IDF(self)
varlist=[IDF(Xidf,random()) for i in range(1,n+1)]
return varlist
idf_func=exp((ln(-t/(t-1))-self.parameter[1]*ln(self.parameter[0]))/
self.parameter[1])
varlist=[]
for i in range(n):
if s==None:
val=random()
else:
val=s
var=idf_func.subs(t,val)
varlist.append(var)
return varlist
class LogNormalRV(RV):
"""
Procedure Name: LogNormalRV
Purpose: Creates an instance of the log normal distribution
Arguments: 1. mu: a real valued parameter
2. sigma: a strictly positive parameter
Output: 1. A log normal random variable
"""
def __init__(self,mu=Symbol('mu'),
sigma=Symbol('sigma',positive=True)):
#x = Symbol('x', positive = True)
if sigma.__class__.__name__!='Symbol':
if sigma<=0:
err_string='sigma must be positive'
raise RVError(err_string)
if mu in [-oo,oo] or sigma in [-oo,oo]:
err_string='both parameters must be finite'
raise RVError(err_string)
X_dummy=RV([(1/2)*(sqrt(2)*exp((-1/2)*((ln(x)-mu)**2)/(sigma**2)))/
(sqrt(pi)*x*sigma)],[0,oo])
self.func=X_dummy.func
self.support=X_dummy.support
self.ftype=X_dummy.ftype
self.cache={}
class LomaxRV(RV):
"""
Procedure Name: LomaxRV
Purpose: Creates an instance of the lomax distribution
Arguments: 1. kappa: a strictly positive parameter
2. theta: a strictly positive parameter
Output: 1. A lomax random variable
"""
def __init__(self,kappa=Symbol('kappa',positive=True),
theta=Symbol('theta',positive=True)):
#x = Symbol('x', positive = True)
if kappa.__class__.__name__!='Symbol':
if theta.__class__.__name__!='Symbol':
if kappa<=0 or theta<=0:
err_string='both parameters must be positive'
raise RVError(err_string)
if kappa in [-oo,oo] or theta in [-oo,oo]:
err_string='both parameters must be finite'
raise RVError(err_string)
X_dummy=RV([theta*kappa*(1+theta*x)**(-kappa-1)],[0,oo])
self.func=X_dummy.func
self.support=X_dummy.support
self.ftype=X_dummy.ftype
self.parameters=[kappa,theta]
self.cache={}
def variate(self,n=1,s=None,method='special'):
if param_check(self.parameter)==False:
raise RVError('Not all parameters specified')
# Check to see if the user specified a valid method
method_list=['special','inverse']
if method not in method_list:
error_string='an invalid method was specified'
raise RVError(error_string)
# If the inverse method is specified, compute variates using
# the IDF function
if method=='inverse':
Xidf=IDF(self)
varlist=[IDF(Xidf,random()) for i in range(1,n+1)]
return varlist
idf_func=((1-t)**(1/self.parameter[0])-1)/self.parameter[1]
varlist=[]
for i in range(n):
if s==None:
val=random()
else:
val=s
var=idf_func.subs(t,val)
varlist.append(var)
return varlist
class MakehamRV(RV):
"""
Procedure Name: MakehamRV
Purpose: Creates an instance of the Makeham distribution
Arguments: 1. theta: a strictly positive parameter
2. delta: a strictly positive parameter
3: kappa: a strictly positive parameter
Output: 1. A log normal random variable
"""
def __init__(self,theta=Symbol('theta',positive=True),
delta=Symbol('delta',positive=True),
kappa=Symbol('kappa')):
#x = Symbol('x', positive = True)
if theta.__class__.__name__!='Symbol':
if delta.__class__.__name__!='Symbol':
if theta<=0 or delta<=0:
err_string='alpha and delta must be positive'
raise RVError(err_string)
if theta in [-oo,oo] or delta in [-oo,oo] or kappa in [-oo,oo]:
err_string='all parameters must be finite'
raise RVError(err_string)
X_dummy=RV((theta+delta*kappa**x)*
exp(-theta*x-delta*(kappa**x-1)/log(kappa)),[0,oo])
self.func=X_dummy.func
self.support=X_dummy.support
self.ftype=X_dummy.ftype
self.cache={}
class MuthRV(RV):
"""
Procedure Name: MuthRV
Purpose: Creates an instance of the Muth distribution
Arguments: 1. kappa: a strictly positive parameter
Output: 1. A log normal random variable
"""
def __init__(self,kappa=Symbol('kappa',positive=True)):
# x = Symbol('x', positive = True)
if kappa.__class__.__name__!='Symbol':
if kappa<=0:
err_string='kappa must be positive'
raise RVError(err_string)
if kappa in [-oo,oo]:
err_string='kappa must be finite'
raise RVError(err_string)
X_dummy=RV([(exp(kappa*x)-kappa)*exp((-exp(kappa*x)/kappa)+
kappa*x+(1/kappa))],
[0,oo])
self.func=X_dummy.func
self.support=X_dummy.support
self.ftype=X_dummy.ftype
self.cache={}
class NormalRV(RV):
"""
Procedure Name: NormalRV
Purpose: Creates an instance of the normal distribution
Arguments: 1. mu: a real valued parameter
2. sigma: a strictly positive parameter
Output: 1. A normal random variable
"""
def __init__(self,mu=Symbol('mu'),
sigma=Symbol('sigma',positive=True)):
if sigma.__class__.__name__!='Symbol':
if sigma<=0:
err_string='sigma must be positive'
raise RVError(err_string)
if sigma in [-oo,oo] or mu in [-oo,oo]:
err_string='both parameters must be finite'
raise RVError(err_string)
X_dummy=RV((exp((-(x-mu)**2)/(2*sigma**2))*sqrt(2))/(2*sigma*sqrt(pi))
,[-oo,oo])
self.func=X_dummy.func
self.support=X_dummy.support
self.ftype=X_dummy.ftype
self.parameter=[mu,sigma]
self.cache={}
def variate(self,n=1,s=None,method='special'):
# If no parameter is specified, return an error
if param_check(self.parameter)==False:
raise RVError('Not all parameters specified')
# Check to see if the user specified a valid method
method_list=['special','inverse']
if method not in method_list:
error_string='an invalid method was specified'
raise RVError(error_string)
# If the inverse method is specified, compute variates using
# the IDF function
if method=='inverse':
Xidf=IDF(self)
varlist=[IDF(Xidf,random()) for i in range(1,n+1)]
return varlist
if s != None and n == 1:
return [IDF(self,s)]
# Otherwise, use the Box-Muller method to compute variates
mean=self.parameter[0];var=self.parameter[1]
U=UniformRV(0,1)
Z1=lambda (val1,val2): sqrt(-2*ln(val1))*cos(2*pi*val2).evalf()
gen_uniform=lambda x: U.variate(n=1)[0]
val_pairs=[(gen_uniform(1),gen_uniform(1)) for i in range(1,n+1)]
varlist=[Z1(pair) for pair in val_pairs]
normlist=[(mean+sqrt(var)*val).evalf() for val in varlist]
return normlist
class ParetoRV(RV):
"""
Procedure Name: ParetoRV
Purpose: Creates an instance of the pareto distribution
Arguments: 1. theta: a strictly positive parameter
2. kappa: a strictly positive parameter
Output: 1. A Paerto random variable
"""
def __init__(self,theta=Symbol('theta',positive=True),
kappa=Symbol('kappa',positive=True)):
#x = Symbol('x', positive = True)
if theta.__class__.__name__!='Symbol':
if kappa.__class__.__name__!='Symbol':
if theta<=0 or kappa<=0:
err_string='both parameters must be positive'
raise RVError(err_string)
if theta in [-oo,oo] or kappa in [-oo,oo]:
err_string='both parameters must be finite'
raise RVError(err_string)
X_dummy=RV([(kappa*theta**(kappa))/(x**(kappa+1))],[theta,oo])
self.func=X_dummy.func
self.support=X_dummy.support
self.ftype=X_dummy.ftype
self.cache={}
class RayleighRV(RV):
"""
Procedure Name: RayleighRV
Purpose: Creates an instance of the Rayleigh distribution
Arguments: 1. theta: a strictly positive parameter
Output: 1. A log normal random variable
"""
def __init__(self,theta=Symbol('theta',positive=True)):
#x = Symbol('x', positive = True)
if theta.__class__.__name__!='Symbol':
if theta<=0:
err_string='both parameters must be positive'
raise RVError(err_string)
if theta in [-oo,oo]:
err_string='both parameters must be finite'
X_dummy=RV([2*theta**(2)*x*exp(-theta**(2)*x**2)],[0,oo])
self.func=X_dummy.func
self.support=X_dummy.support
self.ftype=X_dummy.ftype
self.cache={}
class TriangularRV(RV):
"""
Procedure Name: TriangularRV
Purpose: Creates an instance of the triangular distribution
Arguments: 1. a: a real valued parameter
2. b: a real valued parameter
3. c: a real valued parameter
** Note: a<b<c ***
Output: 1. A triangular variable
"""
def __init__(self,a=Symbol('a'),b=Symbol('b'),c=Symbol('c')):
if a.__class__.__name__!='Symbol':
if b.__class__.__name__!='Symbol':
if c.__class__.__name__!='Symbol':
if a>=b or b>=c or a>=c:
err_string='the parameters must be in ascending order'
raise RVError(err_string)
if a in [-oo,oo] or b in [-oo,oo] or c in [-oo,oo]:
err_string='all parameters must be finite'
raise RVError(err_string)
X_dummy=RV([(2*(x-a))/((c-a)*(b-a)),(2*(c-x))/((c-a)*(c-b))],[a,b,c])
self.func=X_dummy.func
a=Symbol('a');b=Symbol('b');c=Symbol('c')
self.support=X_dummy.support
self.ftype=X_dummy.ftype
self.parameter=[a,b,c]
self.cache={}
class TRV(RV):
"""
Procedure Name: TRV
Purpose: Creates an instance of the t distribution
Arguments: 1. N: a positive integer parameter
Output: 1. A log normal random variable
"""
def __init__(self,N=Symbol('N'),positive=True,integer=True):
if N.__class__.__name__!='Symbol':
if N<=0:
if type(N)!=int:
err_string='N must be a positive integer'
raise RVError(err_string)
X_dummy=RV([(gamma(N/2+1/2)*(1+((x**2)/N))**(-(N/2)-1/2))/
(sqrt(N*pi)*gamma(N/2))],[-oo,oo])
self.func=X_dummy.func
self.support=X_dummy.support
self.ftype=X_dummy.ftype
self.cache={}
class UniformRV(RV):
"""
Procedure Name: UniformRV
Purpose: Creates an instance of the uniform distribution
Arguments: 1. a: a real valued parameter
2. b: a real valued parameter
** Note: b>a **
Output: 1. A uniform random variable
"""
def __init__(self,a=Symbol('a'),b=Symbol('b')):
if a.__class__.__name__!='Symbol':
if b.__class__.__name__!='Symbol':
if a>=b:
err_string='the parameters must be in ascending order'
raise RVError(err_string)
if a in [-oo,oo] or b in [-oo,oo]:
err_string='all parameters must be finite'
raise RVError(err_string)
X_dummy=RV(simplify((b-a)**(-1)),[a,b])
self.func=X_dummy.func
self.support=X_dummy.support
self.ftype=X_dummy.ftype
self.parameter=[a,b]
self.cache={}
def variate(self,n=1,s=None,method='special'):
# If no parameter is specified, return an error
if param_check(self.parameter)==False:
raise RVError('Not all parameters specified')
# Check to see if the user specified a valid method
method_list=['special','inverse']
if method not in method_list:
error_string='an invalid method was specified'
raise RVError(error_string)
# If the inverse method is specified, compute variates using
# the IDF function
if method=='inverse':
Xidf=IDF(self)
varlist=[IDF(Xidf,random()) for i in range(1,n+1)]
return varlist
# Generate uniform variates
idf_func=-t*self.parameter[0]+t*self.parameter[1]+self.parameter[0]
varlist=[]
for i in range(n):
if s==None:
val=random()
else:
val=s
var=idf_func.subs(t,val)
varlist.append(var)
varlist.sort()
return varlist
class WeibullRV(RV):
"""
Procedure Name: WeibullRV
Purpose: Creates an instance of the weibull distribution
Arguments: 1. theta: a strictly positive parameter
2. kappa: a strictly positive parameter
Output: 1. A weibull random variable
"""
def __init__(self,theta=Symbol('theta',positive=True),
kappa=Symbol('kappa',positive=True)):
#x = Symbol('x', positive = True)
if theta.__class__.__name__!='Symbol':
if kappa.__class__.__name__!='Symbol':
if theta<=0 or kappa<=0:
err_string='both parameters must be positive'
raise RVError(err_string)
if theta in [-oo,oo] or kappa in [-oo,oo]:
err_string='both parameters must be finite'
raise RVError(err_string)
X_dummy=RV(kappa*theta**(kappa)*x**(kappa-1)*exp(-(theta*x)**kappa),
[0,oo])
self.func=X_dummy.func
self.support=X_dummy.support
self.ftype=X_dummy.ftype
self.parameter=[theta,kappa]
self.cdf = 1 - exp( - (x*theta)**kappa)
self.cache={}
def variate(self,n=1,s=None,method='special'):
# If no parameter is specified, return an error
if param_check(self.parameter)==False:
raise RVError('Not all parameters specified')
# Check to see if the user specified a valid method
method_list=['special','inverse']
if method not in method_list:
error_string='an invalid method was specified'
raise RVError(error_string)
# If the inverse method is specified, compute variates using
# the IDF function
if method=='inverse':
Xidf=IDF(self)
varlist=[IDF(Xidf,random()) for i in range(1,n+1)]
return varlist
# Generate weibull variates
idf_func=exp(-(-ln(-ln(1-t))+self.parameter[1]*ln(self.parameter[0]))/
self.parameter[1])
varlist=[]
for i in range(n):
if s==None:
val=random()
else:
val=s
var=idf_func.subs(t,val)
varlist.append(var)
varlist.sort()
return varlist
"""
Discrete Distributions
"""
class BenfordRV(RV):
"""
Procedure Name: BenfordRV
Purpose: Creates an instance of the Benford distribution
Arguments: 1. None
Output: 1. A Benford random variable
"""
def __init__(self):
X_dummy=RV([(ln((1/x)+1))/(ln(10))],[1,9],['Discrete','pdf'])
self.func=X_dummy.func
self.support=X_dummy.support
self.ftype=X_dummy.ftype
self.cache={}
class BinomialRV(RV):
"""
Procedure Name: BinomialRV
Purpose: Creates an instance of the binomial distribution
Arguments: 1. N: a positive integer parameter
2. p: a positive parameter between 0 and 1
Output: 1. A binomial random variable
"""
def __init__(self,N=Symbol('N',positive=True,integer=True),
p=Symbol('p',positive=True)):
if N.__class__.__name__!='Symbol':
if N<=0:
if type(N)!=int:
err_string='N must be a positive integer'
raise RVError(err_string)
if p.__class__.__name__!='Symbol':
if p<=0 or p>=1:
err_string='p must be between 0 and 1'
raise RVError(err_string)
X_dummy=RV([(factorial(N)*p**(x)*(1-p)**(N-x))/
(factorial(N-x)*factorial(x))],[0,N],
['Discrete','pdf'])
self.func=X_dummy.func
self.support=X_dummy.support
self.ftype=X_dummy.ftype
self.cache={}
class BernoulliRV(BinomialRV):
"""
Procedure Name: BernoulliRV
Purpose: Creates an instance of the bernoulli distribution
Arguments: 1. p: a positive parameter between 0 and 1
Output: 1. A bernoulli random variable
"""
def __init__(self,p=Symbol('p',positive=True)):
X_dummy = BinomialRV(1,p)
self.func=X_dummy.func
self.support=X_dummy.support
self.ftype=X_dummy.ftype
self.cache={}
class GeometricRV(RV):
"""
Procedure Name: GeometricRV
Purpose: Creates an instance of the geometric distribution
Arguments: 1. p: a positive parameter between 0 and 1
Output: 1. A geometric random variable
"""
def __init__(self,p=Symbol('p',positive=True)):
if p.__class__.__name__!='Symbol':
if p<=0 or p>=1:
err_string='p must be between 0 and 1'
raise RVError(err_string)
X_dummy=RV([p*(1-p)**(x-1)],[1,oo],['Discrete','pdf'])
self.func=X_dummy.func
self.support=X_dummy.support
self.ftype=X_dummy.ftype
self.cache={}
class PoissonRV(RV):
"""
Procedure Name: PoissonRV
Purpose: Creates an instance of the poisson distribution
Arguments: 1. theta: a strictly positive parameter
Output: 1. A poisson random variable
"""
def __init__(self,theta=Symbol('theta',positive=True)):
if theta.__class__.__name__!='Symbol':
if theta<=0:
err_string='theta must be positive'
raise RVError(err_string)
if theta in [-oo,oo]:
err_string='theta must be finite'
raise RVError(err_string)
X_dummy=RV([(theta**(x)*exp(-theta))/factorial(x)],
[0,oo],['Discrete','pdf'])
self.func=X_dummy.func
self.support=X_dummy.support
self.ftype=X_dummy.ftype
self.cache={}
class UniformDiscreteRV(RV):
"""
Procedure Name: UniformDiscreteRV
Purpose: Creates an instance of the uniform discrete distribution
Arguments: 1. a: the beggining point of the interval
2. b: the end point of the interval (note: b>a)
Output: 1. A uniform discrete random variable
"""
def __init__(self,a=Symbol('a'),b=Symbol('b'),k=1):
if b<=a:
err_string='b is only valid if b > a'
raise RVError(err_string)
if (b-a)%k != 0:
err_string='(b-a) must be divisble by k'
raise RVError(err_string)
n = int((b-a)/k)
X_dummy=RV([Rational(1,n+1) for i in range(1,n+2)],
[a+i*k for i in range(n+1)], ['discrete','pdf'])
self.func=X_dummy.func
self.support=X_dummy.support
self.ftype=X_dummy.ftype
self.cache={}
"""
Bivariate Distributions
"""
class BivariateNormalRV(BivariateRV):
"""
Procedure Name: BivariateNormalRV
Purpose: Creates an instance of the bivariate normal distribution
Arugments: 1. mu: a real valued parameter
2. sigma1: a strictly positive parameter
3. sigma2: a strictly positive parameter
4. rho: a parameter >=0 and <=1
Output: 1. A bivariate normal random variable
"""
def __init__(self,mu=Symbol('mu'),sigma1=Symbol('sigma1',positive=True),
sigma2=Symbol('sigma2',positive=True),rho=Symbol('rho')):
if rho.__class__.__name__!='Symbol':
if rho<0 or rho>1 :
err_string='rho must be >=0 and <=1'
raise RVError(err_string)
if sigma1.__class__.__name__!='Symbol':
if sigma1<=0:
err_string='sigma1 must be positive'
raise RVError(err_string)
if sigma2.__class__.__name__!='Symbol':
if sigma2<=0:
err_string='sigma2 must be positive'
raise RVError(err_string)
pdf_func=((1/(2*pi*sigma1*sigma2*sqrt(1-rho**2)))*
exp(-mu/(2*(1-rho**2))))
X_dummy=BivariateRV([pdf_func],[[oo],[oo]],['continuous','pdf'])
self.func=X_dummy.func
self.constraints=X_dummy.constraints
self.ftype=X_dummy.ftype
class ExampleRV(BivariateRV):
def __init__(self):
X_dummy=BivariateRV([(21/4)*x**2*y],[[1-y,y-sqrt(x)]]
,['continuous','pdf'])
self.func=X_dummy.func
self.constraints=X_dummy.constraints
self.ftype=X_dummy.ftype | APPLPy | /APPLPy-0.4.12.tar.gz/APPLPy-0.4.12/applpy/dist_type.py | dist_type.py |
from __future__ import division
from sympy import (Symbol, symbols, oo, integrate, summation, diff,
exp, pi, sqrt, factorial, ln, floor, simplify,
solve, nan, Add, Mul, Integer, function,
binomial, pprint,log,expand,zoo,latex,Piecewise,Rational,
Sum,S,Float)
from .rv import (RV, RVError, CDF, CHF, HF, IDF, IDF, PDF, SF,
BootstrapRV, Convert)
from sympy.plotting.plot import plot
from random import random
import numpy as np
import pandas as pd
import pylab as pyplt
x,y,z,t=symbols('x y z t')
"""
A Probability Progamming Language (APPL) -- Python Edition
Copyright (C) 2001,2002,2008,2010,2014 Andrew Glen, Larry
Leemis, Diane Evans, Matthew Robinson
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
"""
class StochError(Exception):
"""
Stoch Error Class
Defines a custom error messages for exceptions relating
to stochastic processes
"""
def __init__(self,value):
self.value=value
def __str__(self):
return repr(self.value)
class MarkovChain:
"""
Markov Chain Class
Defines the data structure for APPLPy Markov Chains
Defines procedures relating to APPLPy Markov Chains
"""
def __init__(self,P,init=None,states=None):
"""
Procedure Name: __init__
Purpose: Initializes an instance of the Markov Chain class
Arguments: 1. P: the transition matrix of the markov chain
2. init: the initial distribution for the markov chain
if the initial distribution is entered as a row
vector, it will be transposed into a column vector
(for more convenient and flexible input)
Output: 1. And instance of the Markov Chain class
"""
# Check to ensure that the transition probability matrix is entered
# as an array or as a list. If it is not, raise an error
if type(P) != np.ndarray:
if type(P) != list:
err_string = 'The transition probability matrix must '
err_string += 'be entered as a list of lists or as a '
err_string += 'numpy array'
raise StochError(err_string)
else:
P = np.array(P)
# Optionally set up the markov process to recognizes names for the
# states in the state space
self.state_space = None
if states != None:
# If the number of states does not equal the dimension of the
# transition matrix, return an error
if len(states) != P.shape[0]:
err_string = 'The number of states in the state space '
err_string += 'must be equal to the dimensions of the '
err_string += 'transition probability matrix'
raise StochError(err_string)
# Convert the state labels to strings, set the state space
# for the markov chain
state_space = [str(state_label) for state_label in states]
self.state_space = state_space
else:
state_space = range(P.shape[0])
self.state_space = state_space
self.index_dict = {}
for i, state in enumerate(self.state_space):
self.index_dict[state] = i
# Check to make sure that the transition probability matrix is a square
# matrix
if P.shape[0] != P.shape[1]:
err_string = 'The transition probability matrix must be a'
err_string += ' square matrix'
raise StochError(err_string)
# Check to make sure each row in the transition probability matrix
# sums to 1
num_error=.000001
for i in range(P.shape[0]):
if sum(P[i])>1+num_error or sum(P[i])<1-num_error:
err_string = 'Each row in the transition probability matrix'
err_string += ' sum to one. '
row_id = 'Row %s does not sum to one.' % (str(i+1))
err_string += row_id
raise StochError(err_string)
self.P=P
self.P_print=self.matrix_convert(P)
# If an initial distribution is specified, check to make sure that it
# is entered as an array or list
if init != None:
if type(init) != np.ndarray:
if type(init) != list:
err_string = 'The initial distribution must '
err_string += 'be entered as a list or as a '
err_string += 'numpy array'
raise StochError(err_string)
else:
init=np.array(init)
# Check to make sure each the initial distribution sums to 1
num_error=.000001
if sum(init)>1+num_error or sum(init)<1-num_error:
err_string = 'The initial distribution must sum to one'
raise StochError(err_string)
self.init=init
self.init_print=self.vector_convert(init)
else:
self.init=None
self.init_print=None
# Initialize the state of the system to the initial distribution
self.state=init
self.state_print=self.init_print
self.steps=0
"""
Special Class Methods
Procedures:
1. __repr__(self)
"""
def __repr__(self):
"""
Procedure Name: __repr__
Purpose: Sets the default string display setting for the MC class
Arguments: 1. self: the markov chain
Output: 1. Print statements showing the transition probability
matrix and the initial state of the system
"""
return repr(self.display())
"""
Utility Class Methods
Procedures:
1. display(self)
2. matrix_convert(self,matrix)
3. vector_convert(self,vector)
"""
def display(self,option='trans mat',n=1,method='float'):
"""
Procedure Name: display
Purpose: Displays the markov process in an interactive environment
Arugments: 1. self: the markov process
Output: 1. The transition probability matrix
2. The initial state of the system
3. The current state of the system
"""
option_list = ['trans mat','steady state']
if option not in option_list:
options = ''
for option_type in option_list:
options += option_type+', '
err_string = 'Invalid option. Valid options are: '
err_string += options
raise StochError(err_string)
if option == 'trans mat':
# Check to make sure that the number of steps is an integer value
if (type(n) != int) and (n.__class__.__name__!='Symbol'):
err_string = 'The number of steps in a discrete'
err_string = ' time markov chain must be an integer value'
raise StochError(err_string)
if n == 1:
print 'The transition probability matrix:'
print self.P_print
else:
print 'The transition probability matrix after %s steps:'%(n)
print self.matrix_convert(self.trans_mat(n,method=method))
print '----------------------------------------'
print 'The initial system state:'
print self.init_print
if option == 'steady state':
print 'The steady state probabilities are:'
print self.vector_convert(self.steady_state(method=method))
if option == 'init':
print 'The initial conditions are:'
print self.vector_convert(self.init)
def matrix_convert(self,matrix):
"""
Procedure Name: matrix_convert
Purpose: Converts matrices to pandas data frame so that they can
be displayed with state space labels
Arugments: 1. self: the markov process
2. matrix: the matrix to be converted for display
Output: 1. The matrix in display format
"""
display_mat = pd.DataFrame(matrix, index=self.state_space,
columns = self.state_space)
return display_mat
def vector_convert(self,vector):
"""
Procedure Name: vector_convert
Purpose: Converts vectors to pandas data frame so that they can
be displayed with state space labels
Arugments: 1. self: the markov process
2. vector: the vector to be converted for display
Output: 1. The vector in display format
"""
display_vec = pd.DataFrame(vector, index=self.state_space,
columns = ['Prob'])
return display_vec
"""
Functional Class Methods
Procedures:
1. absorption_prob(self,state)
2. absorption_steps(self)
3. classify_states(self)
4. long_run_prob(self, method)
5. probability(self,state,given,method)
6. reachability(self)
7. steady_state(self, method)
8. trans_mat(self,n,method)
"""
def absorption_prob(self,state):
"""
Procedure Name: absorption_prob
Purpose: Gives the probability of being absorbed into the specified
state, given the the markov chain starts in each other state
Arguments: 1. state: the absorbing state of interest
Output: 1. A vector of probabilities
"""
if state not in self.state_space:
err_string = 'Specified state is not in the state space'
raise StochError(err_string)
trans_mat = self.P
size = np.size(trans_mat,axis=0)
B = self.reachability()
j = self.index_dict[state]
if sum(B[j,:]) != 1:
err_string = 'The specified state is not absorbing'
raise StochError(err_string)
P = list(symbols('P0:%d'%(size),positive=True))
# P[j] = 1 since state j is absorbing
P[j] = 1
# P[i] = 0 if state j is not reachable from i
for item in self.index_dict:
i = self.index_dict[item]
if (i != j) and (B[i,j] == False):
P[i] = 0
# Set up remaining unknowns
eqns = []
for i,unknown in enumerate(P):
if unknown.__class__.__name__=='Symbol':
lhs = np.dot(np.array(P),trans_mat[i])
new_eqn = unknown - lhs
eqns.append(new_eqn)
soln = solve(eqns)
for i, unknown in enumerate(P):
if unknown.__class__.__name__ == 'Symbol':
P[i] = soln[P[i]]
return P
def absorption_steps(self):
"""
Procedure Name: absorption_steps
Purpose: Gives the expected number of steps until absoption,
given the initial state of the Markov chain
Arguments: 1. None
Output: 1. A vector of expected values
"""
trans_mat = self.P
size = np.size(trans_mat, axis=0)
B = self.reachability()
M = list(symbols('M0:%d'%(size),positive=True))
# If a state is absorbing, the number of steps to absorption is 0
for item in self.index_dict:
i = self.index_dict[item]
if sum(B[i,:]) == 1:
M[i] = 0
# Set up the remaining unknown equations and solve them using
# first step analysis
eqns = []
for i, unknown in enumerate(M):
if unknown.__class__.__name__ == 'Symbol':
lhs = 1 + np.dot(np.array(M),self.P[i,:])
new_eqn = unknown - lhs
eqns.append(new_eqn)
soln = solve(eqns)
for i, unknown in enumerate(M):
if unknown.__class__.__name__ == 'Symbol':
M[i] = soln[M[i]]
return M
def long_run_probs(self, method = 'float'):
"""
Procedure Name: long_run_probs
Purpose: Returns the long run fraction of time spent in state j,
given that the markov chain starts in state i
Arguments: 1. None
Output: 1. Matrix of probabilities
"""
if method not in ['float','rational']:
err_string = 'The method must be specified as float or rational.'
raise StochError(err_string)
trans_mat = self.P
size = np.size(trans_mat, axis=0)
self.classify_states()
B = self.reachability()
Pi = np.zeros(shape=(size,size))
if method == 'rational':
Pi = Pi.astype(object)
for i in range(size):
for j in range(size):
Pi[i,j] = Rational(Pi[i,j])
for item in self.index_dict:
i = self.index_dict[item]
# If a state is absorbing, then all time is spent in that
# state if the DTMC starts there
if sum(B[i,:]) == 1:
Pi[i,:] = [1 if j == i else 0 for j in range(size)]
abs_prob = self.absorption_prob(item)
Pi[:,i] = abs_prob
# Solve the problem independently for each recurrent class
for item in self.classify:
can_reach = 0
abs_flag = False
for state in self.classify[item]:
index = self.index_dict[state]
can_reach += sum(B[index,:])
if can_reach == len(self.classify[item]):
abs_flag = True
if item != 'Transient' and abs_flag == False:
states_rec = self.classify[item]
size_rec = len(states_rec)
P_rec = np.zeros(shape = (size_rec,size_rec))
if method == 'rational':
P_rec = P_rec.astype(object)
for i_rec, item1 in enumerate(states_rec):
for j_rec, item2 in enumerate(states_rec):
i = self.index_dict[item1]
j = self.index_dict[item2]
P_rec[i_rec,j_rec] = trans_mat[i,j]
X_rec = MarkovChain(P_rec, states = states_rec)
steady_rec = X_rec.steady_state(method = method)
for item in states_rec:
i_rec = X_rec.index_dict[str(item)]
j = self.index_dict[item]
for item in states_rec:
i = self.index_dict[item]
if method == 'float':
Pi[i,j] = steady_rec[i_rec][0]
elif method == 'rational':
Pi[i,j] = steady_rec[i_rec]
# If a state is transient, the long run proportion of
# time spent in each of these states is 0
for item in self.classify['Transient']:
i = self.index_dict[item]
# Find the probability of transition to any
# non-absorbing recurrent class that is reachable
# from the transient state
current_prob = sum(Pi[i,:])
trans_dict = {}
for equiv_class in self.classify:
if equiv_class != 'Transient':
can_reach = 0
total_prob = 0
abs_flag = False
for j,state in enumerate(self.classify[equiv_class]):
index = self.index_dict[state]
can_reach += sum(B[index,:])
total_prob = self.P[i,j]
if can_reach == len(self.classify[equiv_class]):
abs_flag = True
if abs_flag == False:
trans_dict[equiv_class]=total_prob
trans_sum = sum([trans_dict[key] for key in trans_dict])
for key in trans_dict:
trans_dict[key] /= trans_sum
for equiv_class in self.classify:
if equiv_class in trans_dict:
for state in self.classify[equiv_class]:
j = self.index_dict[state]
Piij = Pi[j,j] * trans_dict[equiv_class]
Pi[i,j] = Piij * current_prob
return Pi
def classify_states(self):
"""
Procedure Name: classify_states
Purpose: Classifies states in the state space as either transient
or recurrent. Recurrent states are grouped togehter. The method
for classifying states is described in Weiss, B. 'A non-recursive
algorithm for classifying the states of a finite Markov chain'.
1987. European Journal of Operations Research.
Arguments: 1. None
Output: 1. A dictionary of states. If there are no transient states
self.reducible is set to False. Otherwise, it is
set to True.
"""
n = self.P.shape[0]
B = self.reachability()
# C[j] gives the number states from which j can be accessed
C = [0 for i in range(n)]
for j in range(n):
C[j] = sum(B[:,j])
# T[j] = 0 for transient states, > 0 for recurrent states. Recurrent
# states in the same equivalence class have the same value
T = np.array([0 for i in range(n)])
c = 0
Z = None
while True:
c += 1
Z = max(C)
if Z == 0:
break
K = [i for i in range(n) if C[i] == Z]
k = K[0]
for j in range(n):
if B[k,j] == True:
T[j] = c
if B[j,k] == True:
C[j] = 0
classes = ['Transient']
num_abs = 0
num_rec = 0
for i in range(1,c):
classes.append('Recurrent ' + str(i))
state_dict = {}
for k in range(c):
state_dict[classes[k]] = [self.state_space[i] for i in range(n)
if T[i] == k]
self.classify = state_dict
if len(self.classify['Transient']) == 0:
self.reducible = False
else:
self.reducible = True
return self.classify
def probability(self,states,given=None,method='float'):
"""
Procedure Name: probability
Purpose: Computes the probability of reaching a state, given that
another state has been realized
Arguments: 1. states: a list of tuples. The first entry in each
tuple is the time period when the state is realized
and the second entry is the state
2. given: an optional list of conditions, expressed as
tuples. When entered, the procedure conditions
the probability on these states
Output: 1. A probability
"""
# Check to make sure the states and conditional statements are
# in the proper form
for state in states:
if type(state) != tuple:
err_string = 'Each state must be entered as a tuple'
raise StochError(err_string)
if len(state) != 2:
err_string = 'Each state must be a tuple with two elements, '
err_string += 'the first is the time period and the second '
err_string += 'is the name of the state'
raise StochError(err_string)
if state[1] not in self.state_space:
err_string = 'A state was entered that does not appear '
err_string += 'in the state space of the Markov Chain'
raise StochError(err_string)
# If no conditions are given, check to make sure that initial
# conditions are specified
if given == None:
if type(self.init) != np.ndarray:
if self.init == None:
err_string = 'Unconditional probabilities can only be '
err_string += 'computed if initial conditions are '
err_string += 'specified.'
raise StochError(err_string)
# Make sure that the state for a time period is not specified
# more than once
states.sort()
states_specified = []
for i in range(len(states)-1):
states_specified.append(states[i])
if states[i][0] == states[i+1][0]:
err_string = 'Two different states were specified for '
err_string += 'the same time period'
raise StochError(err_string)
states_specified.append(states[-1])
if given != None:
given.sort()
for i in range(len(given)-1):
err_string = 'Two different states were specified '
err_string += 'the same time period'
if given[i][0] == given[i+1][0]:
raise StochError(err_string)
if given[i] in states_specified:
raise StochError(err_string)
if given[-1] in states_specified:
raise StochError(err_string)
# If no conditions are specified, compute the probability
if given == None:
prev_time = 0
prev_state = None
step_mat = {1:self.P_print}
init_states = self.init_print['Prob']
total_prob = 1
while len(states) > 0:
current_time = states[0][0]
current_state = states[0][1]
time_diff = current_time - prev_time
# If we haven't computed it yet, compute transition
# matrix using C-K equations. Store it in a dict
# so that it does not need to be computed again
if time_diff not in step_mat and time_diff != 0:
trans = self.trans_mat(n=time_diff,method=method)
step_mat[time_diff] = self.matrix_convert(trans)
# If this is the first iteration, condition on the
# distribution of the initial states
if prev_state == None:
if time_diff == 0:
total_prob *= init_states[current_state]
else:
init_prob = 0
for state in self.state_space:
prob_to_state = init_states[state]
p_n = step_mat[time_diff][current_state][state]
prob_to_state *= p_n
init_prob += prob_to_state
total_prob *= init_prob
# If this is not the first iteration, compute the
# transition probability
else:
total_prob *= step_mat[time_diff][current_state][prev_state]
prev_state = current_state
prev_time = current_time
del states[0]
# If conditions are specified, compute the probability
if type(given) == list:
if given[0][0] < states[0][0]:
shift = given[0][0]
total_states = given + states
for i,element in enumerate(total_states):
total_states[i] = (element[0]-shift,element[1])
init_prob = self.init_print['Prob'][given[0][0]]
total_prob = self.probability(states=total_states,
method=method)/init_prob
else:
total_states = given + states
total_prob = self.probability(states=total_states,
method=method)
return total_prob
def reachability(self, method = 'float'):
"""
Procedure Name: reachability
Purpose: Computes boolean matrix B such that B[i][j]=1 if state
j can be reached from state i, 0 otherwise. The method
for computing B is described in Weiss, B. 'A non-recursive
algorithm for classifying the states of a finite Markov chain'.
1987. European Journal of Operations Research.
Arguments: 1. None
Output: 1. A boolean matrix
"""
trans_mat = self.P
n = trans_mat.shape[0]
Q = []
for row in trans_mat:
qrow = [x > 0 for x in row]
Q.append(qrow)
Q = np.array(Q,dtype=bool)
for i in range(n):
Q[i,i]=True
B = np.linalg.matrix_power(Q,n-1)
return B
def steady_state(self, method = 'float'):
"""
Procedure Name: steady_state
Purpose: Computes the long run fraction of time spent in state i
Arguments: 1. None
Output: 1. A vector containing the long run fraction of time
spent in state i
"""
# Need to add code to check to make sure that the markov chain
# is irreducible, aperiodic and positive recurrent
if method not in ['float', 'rational']:
raise StochError('Method must be either float or rational')
self.classify_states()
if self.reducible == True:
err_string = 'This Markov chain is reducible. The steady state '
err_string += 'only works for irreducible chains.'
raise StochError(err_string)
trans_mat = self.P
size = np.size(trans_mat,axis=0)
# The steady state probabilities are found by solving the following
# system: Pj = sum( Pij*Pj ) for all j, 1 = sum(Pj)
if method == 'float':
trans_mat_T = trans_mat.transpose()
A = trans_mat_T - np.identity(size)
B = np.vstack((A,np.array([1 for i in range(size)])))
B = B[1:,]
a = [0 for i in range(size)]
a[-1]=1
b = np.array(a).reshape(-1,1)
soln = np.dot(np.linalg.inv(B),b)
return soln
# The stead state probabilities are computing by explicity solving
# the system of equations using computer algebra
if method == 'rational':
a = symbols('a0:%d'%(size),positive=True)
eqns = []
norm_eqn = -1
for i in range(1,size):
current_eqn = 0
for j in range(size):
current_eqn += trans_mat[j][i]*a[j]
current_eqn -= a[i]
norm_eqn += a[i]
eqns.append(current_eqn)
eqns.append(norm_eqn+a[0])
solns = solve(eqns)
soln = [solns[a[i]] for i in range(size)]
return np.array(soln)
def trans_mat(self,n=Symbol('n',positive=True),method='float'):
"""
Procedure Name: trans_mat
Purpose: Computes the state of the system after n steps
Arguments: 1. n: the number of steps the system takes forward
Output: 1. The transition probability matix for n steps
"""
# Check to make sure that the number of steps is an integer value
if (type(n) != int) and (n.__class__.__name__!='Symbol'):
err_string = 'The number of steps in a discrete time markov chain'
err_string = ' must be an integer value'
raise StochError(err_string)
# Compute the transition probability transition matrix for n steps
# To efficiently compute powers of the matrix, this algorithm
# finds the eigen decomposition of the matrix, and then computes
# the power of the elements in the diagonal matrix
if method == 'float':
eigen = np.linalg.eig(self.P)
Dk = np.diag(eigen[0]**n)
T = eigen[1]
Tinv = np.linalg.inv(T)
Pk = np.dot(np.dot(T,Dk),Tinv)
return Pk
if method == 'rational':
Pk = self.P
for i in range(n-1):
Pk = np.dot(self.P,Pk)
return Pk
"""
Format Conversion Procedures:
1. matrix_display(matrix,states)
2. vector_display(vector,states)
"""
def matrix_display(matrix,states):
"""
Procedure Name: matrix_convert
Purpose: Converts matrices to pandas data frame so that they can
be displayed with state space labels
Arugments: 1. self: the markov process
2. matrix: the matrix to be converted for display
Output: 1. The matrix in display format
"""
display_mat = pd.DataFrame(matrix, index=states, columns = states)
return display_mat
def vector_display(vector,states):
"""
Procedure Name: vector_convert
Purpose: Converts vectors to pandas data frame so that they can
be displayed with state space labels
Arugments: 1. self: the markov process
2. vector: the vector to be converted for display
Output: 1. The vector in display format
"""
display_vec = pd.DataFrame(vector, index=states, columns = ['Prob'])
return display_vec | APPLPy | /APPLPy-0.4.12.tar.gz/APPLPy-0.4.12/applpy/stoch.py | stoch.py |
from __future__ import division
from sympy import *
from .rv import *
from .stoch import *
from .appl_plot import *
from .dist_type import *
from .stats import *
from .bayes import *
from .queue_dist import *
from .bivariate import *
from .timeseries import *
x,y,z,t=symbols('x y z t')
k,m,n=symbols('k m n',integers=True)
f,g,h=symbols('f g h',cls=Function)
import sys
sys.display_hook=pprint
def Menu():
print '-----------------'
print 'Welcome to ApplPy'
print '-----------------'
print ''
print 'ApplPy Procedures'
print ""
print 'Procedure Notation'
print ""
print 'Capital letters are random variables'
print 'Lower case letters are number'
print 'Greek letters are parameters'
print 'gX indicates a function'
print 'n and r are positive integers where n>=r'
print 'Square brackets [] denote a list'
print 'Curly bracks {} denote an optional variable'
print ""
print ""
print 'RV Class Procedures'
print 'X.variate(n,x),X.verifyPDF()'
print ""
print 'Functional Form Conversion'
print 'CDF(X,{x}),CHF(X,{x}),HF(X,{x}),IDF(X,{x})'
print 'PDF(X,{x}),SF(X,{x}),BootstrapRV([data])'
print 'Convert(X,{x})'
print ""
print 'Procedures on One Random Variable'
print 'ConvolutionIID(X,n),CoefOfVar(X),ExpectedValue(X,gx)'
print 'Kurtosis(X),MaximumIID(X,n),Mean(X),MGF(X)'
print 'MinimumIID(X,n),OrderStat(X,n,r),ProductIID(X,n)'
print 'Skewness(X),Transform(X,gX),Truncate(X,[x1,x2])'
print 'Variance(X)'
print ""
print 'Procedures on Two Random Variables'
print 'Convolution(X,Y),Maximum(X,Y),Minimum(X,Y)'
print 'Mixture([p1,p2],[X,Y]),Product(X,Y)'
print ""
print 'Statistics Procedures'
print 'KSTest(X,[sample]), MOM(X,[sample],[parameters])'
print 'MLE(X,[sample],[parameters],censor)'
print ""
print 'Utilities'
print 'PlotDist(X,{[x1,x2]}),PlotDisplay([plotlist],{[x1,x2]})'
print 'PPPlot(X,[sample]),QQPlot(X,[sample])'
print ""
print 'Continuous Distributions'
print 'ArcSinRV(),ArcTanRV(alpha,phi),BetaRV(alpha,beta)'
print 'CauchyRV(a,alpha),ChiRV(N),ChiSquareRV(N),ErlangRV(theta,N)'
print 'ErrorRV(mu,alpha,d),ErrorIIRV(a,b,c),ExponentialRV(theta)'
print 'ExponentialPowerRV(theta,kappa),ExtremeValueRV(alpha,beta)'
print 'FRV(n1,n2),GammaRV(theta,kappa),GompertzRV(theta,kappa)'
print 'GeneralizedParetoRV(theta,delta,kappa),IDBRV(theta,delta,kappa)'
print 'InverseGaussianRV(theta,mu),InverseGammaRV(alpha,beta)'
print 'KSRV(n),LaPlaceRV(omega,theta), LogGammaRV(alpha,beta)'
print 'LogisticRV(kappa,theta),LogLogisticRV(theta,kappa)'
print 'LogNormalRV(mu,sigma),LomaxRV(kappa,theta)'
print 'MakehamRV(theta,delta,kappa),MuthRV(kappa),NormalRV(mu,sigma)'
print 'ParetoRV(theta,kappa),RayleighRV(theta),TriangularRV(a,b,c)'
print 'TRV(N),UniformRV(a,b),WeibullRV(theta,kappa)'
print ""
print 'Discrete Distributions'
print 'BenfordRV(),BinomialRV(n,p),GeometricRV(p),PoissonRV(theta)'
print '' | APPLPy | /APPLPy-0.4.12.tar.gz/APPLPy-0.4.12/applpy/__init__.py | __init__.py |
from __future__ import division
from sympy import (Symbol, symbols, oo, integrate, summation, diff,
exp, pi, sqrt, factorial, ln, floor, simplify,
solve, nan, Add, Mul, Integer, function,
binomial)
from mpmath import (nsum,nprod)
from random import random
import numpy as np
from .rv import (RV, RVError, CDF, CHF, HF, IDF, IDF, PDF, SF,
BootstrapRV, Convert, Mean, Convolution, Mixture)
from .dist_type import (ErlangRV, ExponentialRV)
x,y,z,t,v=symbols('x y z t v')
"""
A Probability Progamming Language (APPL) -- Python Edition
Copyright (C) 2001,2002,2008,2010,2014 Andrew Glen, Larry
Leemis, Diane Evans, Matthew Robinson, William Kaczynski
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
"""
def QueueMenu():
print 'ApplPy Procedures'
print ""
print 'Procedure Notation'
print ""
print 'X is the distribution of the time between arrivals.'
print 'Y is the service time distribution.'
print 'n is the total number of customers in the system.'
print 'k is the number of customers in the system as time 0'
print 's is the number of identical parallel servers'
print 'a is the first customer of interest'
print 'b is the second customer of interest (a<b)'
print ""
print ""
print 'Queue Procedures'
print 'Queue(X,Y,n,k,s), Cov(X,Y,a,b), kCov(X,Y,a,b,n,k)'
print ""
def Queue(X,Y,n,k=0,s=1):
"""
Procedure Name: Queue
Purpose: Computes the sojourn time distribution for the nth
customer in an M/M/s queue, given k customers are
in the system at time 0.
Arguments: 1. X: the distribution of the time between arrivals
(must be an ExponentialRV)
2. Y: the service time distribution
(must be an ExponentialRV)
3. n: the total number of customers in the system
4. k: the number of customers in the system at time 0
5. s: the number of identical parallel servers
Output: 1. Probability distribution for an M/M/s queue
"""
rho=Symbol('rho')
rho_subs=(1/Mean(X))/(s*(1/Mean(Y)))
lst=BuildDist(X,Y,n,k,s)
probs=MMSQprob(n,k,s)
# Substitute the value of rho into the probability list
sub_probs=[]
for element in probs:
sub_element=element.subs(rho,rho_subs)
sub_probs.append(sub_element)
TIS=Mixture(sub_probs,lst)
return TIS
'''
The following procedures are used to build the Queue, Cov and kCov
procedures. They are not intended for end use for the user. _Q
does not import into the APPLPy namespace to avoid conflict with
the sympy.assumptions procedure Q.
'''
"""
Queue Sub-Procedures:
1. BuildDist(X,Y,n,k,s)
1. MMSQprob(n,k,s)
2. _Q(n,i,k,s)
"""
def BuildDist(X,Y,n,k,s):
"""
Procedure Name: BuildDist
Purpose: Creates the appropriate conditional sojourn time
distribution for each case where a customer
arrives to find i=1 to i=n+k customers present
in an M/M/s queue with k customers initially present
Arguments: 1. X: the distribution of the time between arrivals
(must be an ExponentialRV)
2. Y: the service time distribution
3. n: the total number of customers in the system
4. k: the numober of customers in the system at time 0
5. s: the number of identical parallel servers
Output: 1. lst: the sojourn time distributions in segments
(a list of APPLPy random variables)
"""
# Raise an error if both either of the distributions are not
# exponential
if X.__class__.__name__!='ExponentialRV':
err_string='both distributions in the queue must be'
err_string+='exponential'
raise RVError(err_string)
if Y.__class__.__name__!='ExponentialRV':
err_string='both distributions in the queue must be'
err_string+='exponential'
raise RVError(err_string)
# Pre-compute the mean of y to avoid multiple integrations
meany=Mean(Y)
# Place positive assumptions on x to simplify output
x=Symbol('x',positive=True)
lst=[]
for i in range(1,n+k+1):
if s==1:
lst.append(ErlangRV(1/meany,i))
else:
if i<=s or s>n+k:
lst.append(Y)
else:
lst.append(Convolution(ErlangRV(s*(1/meany),i-s),Y))
return lst
def MMSQprob(n,k,s):
"""
Procedure Name: MMSQprob
Purpose: Computes Pk(n,i) for an M/M/s queue, which is the
probability that customer n will see i customers
in the system counting himself at time T_n with
k customers initially in the system at time 0.
Arguments: 1. n: The total number of customers in the system
2. k: The number of customers in the system at time 0
3. s: The number of parallel servers
Output: 1. Pk: A list of ordered probabilities
"""
lst=[]
for i in range(1,n+k+1):
lst.append(_Q(n,i,k,s))
return lst
def _Q(n,i,k,s):
"""
Procedure Name: _Q
Purpose: Computes the single probability Pk(n,i) for an M/M/s
queue recursively.
Arguments: 1. n: The total number of customers in the system
2. i: An integer value
3. k: The number of customers in the system at time 0
4. s: The number of parallel servers
Output: 1. Pk: A single probability for an M/M/s queue
"""
rho=Symbol('rho')
if k>=1 and i==k+n:
if k>=s:
p=(rho/(rho+1))**n
elif k+n<=s:
p=(rho**n)/(nprod(lambda j: rho+(k+j-1)/s,[1,n]))
elif k<s and s<k+n:
p=(rho**n)/((rho+1)**(n-s+k)*
(nprod(lambda j: rho+(k+j-1)/s,[1,s-k])))
if k==0 and i==n:
if n<=s:
p=(rho**n)/nprod(lambda j: rho+(j-1)/s,[1,n])
elif n>s:
p=(rho**n)/((rho+1)**(n-s)*
nprod(lambda j: rho+(j-1)/s,[1,s]))
if i==1:
p=1-nsum(lambda j: _Q(n,j,k,s),[2,n+k])
if k>=1 and i>=2 and i<=k and n==1:
if k<=s:
p=rho/(rho+(i-1)/s)*nprod(lambda j:
1-rho/(rho+(k-j+1)/s),[1,k-i+1])
elif k>s and i>s:
p=rho/(rho+1)**(k-i+2)
elif i<=s and s<k:
p=rho/((rho+1)**(k-s+1)*(rho+(i-1/s))*
nprod(lambda j: 1-rho/(rho(s-j)/s),[1,s-i]))
if n>=2 and i>=2 and i<=k+n-1:
if i>s:
p=rho/(rho+1)*nsum(lambda j:
(1/(rho+1)**(j-i+1)*_Q(n-1,j,k,s)),
[i-1,k+n-1])
elif i<=s:
p=rho/(rho+(i-1)/s)*(
nsum(lambda j:
nprod(lambda h: 1-rho/(rho+(j-h+1)/s),[1,j-i+1])*
_Q(n-1,j,k,s),[i-1,s-1]) +
nprod(lambda h: 1-rho/(rho+(s-h)/s),[1,s-i]) *
nsum(lambda j: (1/(rho+1))**(j-s+1)*_Q(n-1,j,k,s),[s,k+n-1]))
return simplify(p)
"""
Cov/kCov Sub-Procedures:
1. cases(n)
2. caseprob(n,P,meanX,meanY)
3. Cprime(n,C)
4. ini(n)
5. kcases(n,k)
6. kcaseprob(n,k,P,meanX,meanY)
7. kpath(n,k,A)
8. kprobvec(n,k,meanX,meanY)
9. okay(n,E)
10. path(n,A)
11. probvec(n,meanX,meanY)
12. swapa(n,A)
13. swapb(n,B)
"""
'''
Re-look code for cases,kcases,ini,swapa,swapb and okay for errors
probvec does not sum to 1, error is most likely in one of those
procedures
'''
def cases(n):
"""
Procedure Name: cases
Purpose: Generates all possible arrival/departure sequences for
n customers in an M/M/1 queue initially empty and
idle.
Arguments: 1. n: the total number of customers in the system
Output: 1. C: a list of sequences consisting of 1s and -1s,
where 1s represent an arrival and -1s
represent a departure
"""
# Compute the nth Catalan number
c=factorial(2*n)/factorial(n)/factorial(n+1)
C=np.zeros((c,2*n))
for i in range(c):
# Initialize the matrix C
if i==0:
C[i]=ini(n)
# Produce the successor the C[i]
else:
C[i]=swapa(n,C[i-1])
# Check to see if the successor is legal
# If not, call swapb
if okay(n,C[i])==False:
C[i]=swapb(n,C[i-1])
return C
def caseprob(n,P,meanX,meanY):
"""
Procedure Name: caseprob
Purpose: Computes the probability associated with a given row of
the case matrix C as represented by the path created
by path(n,A)
Input: 1. n: the total number of customers in the system
2. P: the path of a given case
3. meanX: the mean of the arrival distribution
4. meanY: the mean of the service time distribution
Output: 1. p: the probability of the case passed to the procedure
"""
p=1
row=n
col=1
for j in range(2*n-1):
if P[row-1][col]==1 and col<n:
row-=1
p=p*1/meanY/(1/meanX+1/meanY)
elif P[row-1][col]==1 and col==n:
row-=1
elif P[row][col+1]==1 and row+col>n+1:
col+=1
p=p*1/meanX/(1/meanX+1/meanY)
else:
col+=1
return p
def Cprime(n,C):
"""
Procedure Name: Cprime
Purpose: Produces a matrix C' that is the distribution segment
matrix where each row represents the distribution
segments for the case represented by the corresponding
row in the case matrix C. The elements of C' are
limited to a 0, 1 and 2. 0 implies no sojourn time
distribution segment due to an empyting of the system.
1 implies a copeting risk of an arrival or completion of
service and is distributed Exp(theta+mu) and 2 implies a
service completion distribution leg, which is distributed
Exp(mu).
Input: 1. n: the total number of customers in the system
2. C: the case matrix
Output: 1. prime: a matrix with the same number of rows as C and
2n-1 columns
"""
prime=np.zeros((np.size(C,1),2*n-1))
for i in range(np.size(C,1)):
row=n
col=1
pat=path(n,C[i])
dist=np.zeros((1,2*n-1))
for j in range(2*n-1):
if pat[row-1][col]==1 and col<n:
row-=1
dist[0][j]=1
elif pat[row-1][col]==1 and col==n:
row-=1
dist[0][j]=2
elif pat[row-1][col]==1 and row+col>n+1:
col+=1
dist[0][j]=1
else:
col+=1
dist[0][j]=0
prime[i]=dist
return prime
def ini(n):
"""
Procedure Name: ini
Purpose: Initializes a matrix C according to Ruskey and Williams
Returns the first row of C to enable use of prefix
shift algorithm.
Arguments: 1. n: the total number of customers in the system
Output: 1. L: a row vector, the first row of C
"""
L=-np.ones(2*n)
L[0]=1
for i in range(2,n+1):
L[i]=1
for i in range(n+1,2*n):
L[i]=-1
return L
def kcases(n,k):
"""
Procedure Name: kcases
Purpose: Generates all possible arrival/departure sequences for n
customers in an M/M/1 queue with k customers initially
present.
Arguments: 1. n: the total number of customers in the system
2. k: the number of customers initially present in the
system.
Output: 1. C: a list of sequences consiting of 1's and -1s where
1 represents an arrival and -1 represents a
departure.
"""
C=cases(n+k)
j=0
while j<np.size(C,1):
# if the sum of row j of C from column 1 to k != k
if np.sum(C[j,0:k])!=k:
# delete the jth row of C
C=np.delete(C,j,0)
else:
j+=1
# delete column 1st through kth column of C
C=np.delete(C,np.s_[0:k],1)
return C
def kcaseprob(n,k,P,meanX,meanY):
"""
Procedure Name: caseprob
Purpose: Computes the probability associated with a given row of
the case matrix C as represented by the path created
by kpath(n,k,A)
Input: 1. n: the total number of customers in the system
2. k: the total number of customers initially present
in the system
3. P: the path of a given case
4. meanX: the mean of the arrival distributon
5. meanY: the mean of the service time distribution
Output: 1. p: the probability of the case passed to the procedure
"""
p=1
row=n+k
col=0
for j in range(2*n+k):
if P[row-1][col]==1 and col<n:
row-=1
p=p*1/meanY/(1/meanX+1/meanY)
elif P[row-1][col]==1 and col==n:
row-=1
elif P[row][col+1]==1 and row+col>n+1:
col+=1
p=p*1/meanX/(1/meanX+1/meanY)
else:
col+=1
return p
def kCprime(n,k,C):
"""
Procedure Name: Cprime
Purpose: Produces a matrix C' that is the distribution segment
matrix where each row represents the distribution
segments for the case represented by the corresponding
row in the case matrix C. The elements of C' are
limited to a 0, 1 and 2. 0 implies no sojourn time
distribution segment due to an empyting of the system.
1 implies a copeting risk of an arrival or completion of
service and is distributed Exp(theta+mu) and 2 implies a
service completion distribution leg, which is distributed
Exp(mu).
Input: 1. n: the total number of customers in the system
2. k: the number of customers initially in the system
2. C: the case matrix
Output: 1. prime: a matrix with the same number of rows as C and
2*(n+1)+k columns
"""
prime=np.zeros((np.size(C,1),2*n+k))
for i in range(np.size(C,1)):
row=n+k
col=0
pat=kpath(n,k,C[i])
dist=np.zeros((1,2*n+k))
for j in range(2*n+k):
if pat[row-1][col]==1 and col<n:
row-=1
dist[0][j]=1
elif pat[row-1][col]==1 and col==n:
row-=1
dist[0][j]=2
elif pat[row-1][col+1]==1 and row+col>n+1:
col+=1
dist[0][j]=1
else:
col+=1
dist[0][j]=0
prime[i]=dist
return prime
def kpath(n,k,A):
"""
Procedures Name: kpath
Purpose: Creates a path that starts at the lower left corner
of the matrix and moves to the upper right corner.
The first leg of the path is always the arrival of
customer 1. A 1 to the right of the previous 1
signifies an arrival, while a 1 above the previous
1 signifies a service completion.
Arugments: 1. n: the total number of customers in the system
2. k: the number of customers initially present in
the system
3. A: A row from the case matrix C.
Output: 1. pat: A path matrix of size (n+k+1)x(n+1)
"""
row=n+k
col=0
pat=np.zeros((n+k+1,n+1))
pat[n+k][0]=1
for j in range(2*n+k):
if A[j]==1:
col+=1
pat[row][col]=1
else:
row-=1
pat[row][col]=1
return pat
def kprobvec(n,k,meanX,meanY):
"""
Procedure Name: probvec
Purpose: Uses the caseprob procedure to successivly build a vector
of probabilities, one for each case of the C matrix.
Input: 1. n: the total number of customers in the system
2. meanX: the mean of the arrival distribution
3. meanY: the mean of the service time distribution
Output: 1. p: a probability vector of length 2n!/n!/(n+1)!
"""
C=kcases(n,k)
p=np.zeros(np.size(C,1))
for i in range(np.size(C,1)):
p[i]=kcaseprob(n,k,kpath(n,k,C[i]),meanX,meanY)
return p
def okay(n,E):
"""
Procedure Name: okay
Purpose: Checks the output of swapa for an illegal prefix shift,
meaning the result contains an impossible arrival/
service sequence.
Arguments: 1. n: the total number of customers in the system
2. E: the vector resulting from swapa
Output: 1. test: a binary indicator where True signfies the
successor is legal and False signifies that the
successor is illegal
"""
test=True
s=0
for i in range(2*n-1):
s+=E[i]
if s<0:
test=False
break
return test
def path(n,A):
"""
Procedure Name: path
Purpose: Creates a path that starts at the lower left corner
of the matrix and moves to the upper right corner.
The first leg of the path is always the arrival of
customer 1. A 1 to the right of the previous 1 signifies
an arrival, while a 1 above the previous 1 signifies
a service completion.
Arguments: 1. n: the total number of customers in the system
2. A: a row from the case matrix C.
Output: 1. pat: A path matrix of size (n+1)x(n+1)
"""
row=n
col=1
pat=np.zeros((n+1,n+1))
pat[n,0]=1
pat[n,1]=1
for j in range(1,2*n):
if A[j]==1:
col+=1
pat[row,col]=1
else:
row-=1
pat[row,col]=1
return pat
def probvec(n,meanX,meanY):
"""
Procedure Name: probvec
Purpose: Uses the caseprob procedure to successivly build a vector
of probabilities, one for each case of the C matrix.
Input: 1. n: the total number of customers in the system
2. meanX: the mean of the arrival distribution
3. meanY: the mean of the service time distribution
Output: 1. p: a probability vector of length 2n!/n!/(n+1)!
"""
c=factorial(2*n)/factorial(n)/factorial(n+1)
p=np.zeros(c)
for i in range(c):
p[i]=caseprob(n,path(n,cases(n)[i]),meanX,meanY)
return p
def swapa(n,A):
"""
Procedure Name: swapa
Purpose: Conducts the (k+1)st prefix shift in creating all
instances of the case matrix, C, according
to Ruskey and Williams
Arguments: 1. n: the total number of customers in the system
2. A: row i of matrix C
Output: 1. R: the successor of C
"""
R=A
check=1
for i in range(1,2*n-1):
if R[i]==-1 and R[i+1]==1:
temp1=R[i+2]
R[2:(i+2)]=R[1:(i+1)]
check=0
R[1]=temp1
if check==0:
break
return R
def swapb(n,B):
"""
Procedure Name: swapb
Purpose: Conducts the kth prefix shift in creating all
instances of the case matrix, C, accoring
to Ruskey and Williams
Arguments: 1. n: the number of customers in the system
2. B: row i of matrix C
Output: 1. R: the successor of C
"""
R=B
check=1
for i in range(1,2*n-2):
if R[i]==-1 and R[i+1]==1:
temp=R[i+1]
R[2:(i+1)]=R[1:i]
check=0
R[1]=temp
if check==0:
break
return R | APPLPy | /APPLPy-0.4.12.tar.gz/APPLPy-0.4.12/applpy/queue_dist.py | queue_dist.py |
from __future__ import division
from matplotlib.pylab import (plot, xlabel, ylabel, title, grid, arange,
ion, ioff)
from sympy import (symbols)
x,y,z,t,v=symbols('x y z t v')
"""
A Probability Progamming Language (APPL) -- Python Edition
Copyright (C) 2001,2002,2008,2010,2014 Andrew Glen, Larry
Leemis, Diane Evans, Matthew Robinson
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
"""
"""
Plotting Module
Defines procedures for plotting random variables
"""
def mat_plot(funclist,suplist,lab1=None,lab2=None,ftype='continuous'):
"""
Procedure Name: mat_plot
Purpose: Create a matplotlib plot of a random variable
Arguments: 1. RVar: A random variable
2. suplist: The support of the plot
Output: 1. A plot of the random variable
"""
# if the random variable is continuous, plot the function
if ftype=='continuous':
for i in range(len(funclist)):
if funclist[i]=='0':
continue
if 'x' in funclist[i]:
x=arange(suplist[i],suplist[i+1],0.01)
s=eval(funclist[i])
plot(x,s,linewidth=1.0,color='green')
else:
plot([suplist[i],suplist[i+1]],
[funclist[i],funclist[i]],
linewidth=1.0,color='green')
if lab1=='idf':
xlabel('s')
else:
xlabel('x')
if lab1!=None:
ylabel(lab1)
if lab2!=None:
title(lab2)
grid(True)
# If the random variable is discrete, plot the function
if ftype=='discrete':
plot(suplist,funclist,'ro')
if lab1=='F-1(s)':
xlabel('s')
else:
xlabel('x')
if lab1!=None:
ylabel(lab1)
if lab2!=None:
title(lab2)
grid(True)
def prob_plot(Sample,Fitted,plot_type):
"""
Procedure Name: prob_plot
Purpose: Create a mat plot lib plot to compare sample distributions
with theoretical models
Arguments: 1. Sample: Data sample quantiles
2. Model: Model quantiles
Output: 1. A probability plot that compares data with a model
"""
plot(Fitted,Sample,'ro')
x=arange(min(min(Sample),min(Fitted)),
max(max(Sample),max(Fitted)),0.01)
s=x
plot(x,s,linewidth=1.0,color='red')
if plot_type=='QQ Plot':
xlabel('Model Quantiles')
ylabel('Sample Quantiles')
elif plot_type=='PP Plot':
xlabel('Model CDF')
ylabel('Sample CDF')
title(plot_type)
grid(True) | APPLPy | /APPLPy-0.4.12.tar.gz/APPLPy-0.4.12/applpy/appl_plot.py | appl_plot.py |
# APS_BlueSky_tools
Various Python tools for use with BlueSky at the APS
[](https://pypi.python.org/pypi/APS_BlueSky_tools)
[](http://aps-bluesky-tools.readthedocs.io/en/latest/?badge=latest)
[](https://github.com/BCDA-APS/APS_BlueSky_tools/tags)
[](https://github.com/BCDA-APS/APS_BlueSky_tools/releases)
[](https://pypi.python.org/pypi/APS_BlueSky_tools)
[](https://anaconda.org/prjemian/APS_BlueSky_tools)
[](https://travis-ci.org/BCDA-APS/APS_BlueSky_tools)
[](https://coveralls.io/github/BCDA-APS/APS_BlueSky_tools?branch=master)
[](https://landscape.io/github/BCDA-APS/APS_BlueSky_tools/master)
* http://nsls-ii.github.io/
* https://github.com/NSLS-II/bluesky
## Package Information
item | description
------------------|--------------------------------
**author** | Pete R. Jemian
**email** | [email protected]
**copyright** | 2017-2019, Pete R. Jemian
[**license**](APS_BlueSky_tools/LICENSE) | ANL OPEN SOURCE LICENSE
[**documentation**](https://APS_BlueSky_tools.readthedocs.io) | https://APS_BlueSky_tools.readthedocs.io
**source** | https://github.com/BCDA-APS/APS_BlueSky_tools
| APS-BlueSky-tools | /APS_BlueSky_tools-2019.103.0.tar.gz/APS_BlueSky_tools-2019.103.0/README.md | README.md |
from __future__ import print_function
try:
import configparser
except ImportError:
import ConfigParser as configparser
import errno
import json
import os
import re
import subprocess
import sys
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = ("Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND').")
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(me)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print("Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py))
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise EnvironmentError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.SafeConfigParser()
with open(setup_cfg, "r") as f:
parser.readfp(f)
VCS = parser.get("versioneer", "VCS") # mandatory
def get(parser, name):
if parser.has_option("versioneer", name):
return parser.get("versioneer", name)
return None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, "style") or ""
cfg.versionfile_source = get(parser, "versionfile_source")
cfg.versionfile_build = get(parser, "versionfile_build")
cfg.tag_prefix = get(parser, "tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = get(parser, "parentdir_prefix")
cfg.verbose = get(parser, "verbose")
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
LONG_VERSION_PY['git'] = '''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
print("stdout was %%s" %% stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %%s but none started with prefix %%s" %%
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs - tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %%s not under git control" %% root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%%s*" %% tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%%d" %% pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def do_vcs_install(manifest_in, versionfile_source, ipy):
"""Git-specific installation logic for Versioneer.
For Git, this means creating/changing .gitattributes to mark _version.py
for export-subst keyword substitution.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.18) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
version_json = '''
%s
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def versions_from_file(filename):
"""Try to determine the version from _version.py if present."""
try:
with open(filename) as f:
contents = f.read()
except EnvironmentError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename, versions):
"""Write the given version number to the given _version.py file."""
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True,
indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (filename, versions["version"]))
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
class VersioneerBadRootError(Exception):
"""The project root directory is unknown or missing key files."""
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, \
"please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None, "error": "unable to compute version",
"date": None}
def get_version():
"""Get the short version string for this project."""
return get_versions()["version"]
def get_cmdclass():
"""Get the custom setuptools/distutils subclasses used by Versioneer."""
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/warner/python-versioneer/issues/52
cmds = {}
# we add "version" to both distutils and setuptools
from distutils.core import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
print(" date: %s" % vers.get("date"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
# pip install:
# copies source tree to a tempdir before running egg_info/etc
# if .git isn't copied too, 'git describe' will fail
# then does setup.py bdist_wheel, or sometimes setup.py install
# setup.py egg_info -> ?
# we override different "build_py" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.build_py import build_py as _build_py
else:
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib,
cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
# nczeczulin reports that py2exe won't like the pep440-style string
# as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.
# setup(console=[{
# "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION
# "product_version": versioneer.get_version(),
# ...
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
if 'py2exe' in sys.modules: # py2exe enabled?
try:
from py2exe.distutils_buildexe import py2exe as _py2exe # py3
except ImportError:
from py2exe.build_exe import py2exe as _py2exe # py2
class cmd_py2exe(_py2exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_py2exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["py2exe"] = cmd_py2exe
# we override different "sdist" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile,
self._versioneer_generated_versions)
cmds["sdist"] = cmd_sdist
return cmds
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
def do_setup():
"""Main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
except (EnvironmentError, configparser.NoSectionError,
configparser.NoOptionError) as e:
if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg",
file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG % {"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
ipy = os.path.join(os.path.dirname(cfg.versionfile_source),
"__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
cfg.versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-subst keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":
errors = do_setup()
errors += scan_setup_py()
if errors:
sys.exit(1) | APS-BlueSky-tools | /APS_BlueSky_tools-2019.103.0.tar.gz/APS_BlueSky_tools-2019.103.0/versioneer.py | versioneer.py |
import argparse
from collections import OrderedDict
import sys
import time
from APS_BlueSky_tools import utils as APS_utils
from APS_BlueSky_tools import plans as APS_plans
from APS_BlueSky_tools import callbacks as APS_callbacks
BROKER_CONFIG = "mongodb_config"
def get_args():
"""
get command line arguments
"""
from .__init__ import __version__
doc = __doc__.strip().splitlines()[0].strip()
doc += f" version={__version__}"
parser = argparse.ArgumentParser(description=doc)
parser.add_argument('EPICS_PV', action='store', nargs='+',
help="EPICS PV name", default="")
# optional arguments
text = "YAML configuration for databroker"
text += f", default: {BROKER_CONFIG}"
parser.add_argument('-b', action='store', dest='broker_config',
help=text,
default=BROKER_CONFIG)
text = """
additional metadata, enclose in quotes,
such as -m "purpose=just tuned, situation=routine"
"""
parser.add_argument('-m', '--metadata', action='store',
dest='metadata_spec', help=text, default="")
parser.add_argument('-r', '--report', action='store_false',
dest='report',
help="suppress snapshot report",
default=True)
parser.add_argument('-v', '--version',
action='version', version=__version__)
return parser.parse_args()
def parse_metadata(args):
md = OrderedDict()
if len(args.metadata_spec.strip()) > 0:
for metadata in args.metadata_spec.split(","):
parts = metadata.strip().split("=")
if len(parts) == 2:
md[parts[0].strip()] = parts[1].strip()
else:
msg = f"incorrect metadata specification {metadata}"
msg += ", must specify key = value [, key2 = value2 ]"
raise ValueError(msg)
return md
def snapshot_cli():
"""
given a list of PVs on the command line, snapshot and print report
EXAMPLES::
snapshot.py pv1 [more pvs ...]
snapshot.py `cat pvlist.txt`
Note that these are equivalent::
snapshot.py rpi5bf5:0:humidity rpi5bf5:0:temperature
snapshot.py rpi5bf5:0:{humidity,temperature}
"""
from databroker import Broker
from bluesky import RunEngine
args = get_args()
md = OrderedDict(purpose="archive a set of EPICS PVs")
md.update(parse_metadata(args))
obj_dict = APS_utils.connect_pvlist(args.EPICS_PV, wait=False)
time.sleep(2) # FIXME: allow time to connect
db = Broker.named(args.broker_config)
RE = RunEngine({})
RE.subscribe(db.insert)
uuid_list = RE(APS_plans.snapshot(obj_dict.values(), md=md))
if args.report:
snap = list(db(uuid_list[0]))[0]
APS_callbacks.SnapshotReport().print_report(snap)
if __name__ == "__main__":
snapshot_cli() | APS-BlueSky-tools | /APS_BlueSky_tools-2019.103.0.tar.gz/APS_BlueSky_tools-2019.103.0/APS_BlueSky_tools/snapshot.py | snapshot.py |
from collections import OrderedDict
from datetime import datetime
import epics
import itertools
import numpy as np
import threading
import time
from .synApps_ophyd import *
from . import plans as APS_plans
import ophyd
from ophyd import Component, Device, DeviceStatus, FormattedComponent
from ophyd import Signal, EpicsMotor, EpicsSignal, EpicsSignalRO
from ophyd.scaler import EpicsScaler, ScalerCH
from ophyd.positioner import PositionerBase
from ophyd.areadetector.filestore_mixins import FileStoreHDF5
from ophyd.areadetector.filestore_mixins import FileStoreBase
from ophyd.areadetector.filestore_mixins import FileStorePluginBase
from ophyd.areadetector.filestore_mixins import FileStoreIterativeWrite
from ophyd import HDF5Plugin
from ophyd.utils import set_and_wait
def use_EPICS_scaler_channels(scaler):
"""
configure scaler for only the channels with names assigned in EPICS
"""
if isinstance(scaler, EpicsScaler):
import epics
read_attrs = []
for ch in scaler.channels.component_names:
_nam = epics.caget("{}.NM{}".format(scaler.prefix, int(ch[4:])))
if len(_nam.strip()) > 0:
read_attrs.append(ch)
scaler.channels.read_attrs = read_attrs
elif isinstance(scaler, ScalerCH):
read_attrs = []
for ch in scaler.channels.component_names:
nm_pv = scaler.channels.__getattribute__(ch)
if nm_pv is not None and len(nm_pv.chname.value.strip()) > 0:
read_attrs.append(ch)
scaler.channels.read_attrs = read_attrs
class ApsOperatorMessagesDevice(Device):
"""general messages from the APS main control room"""
operators = Component(EpicsSignalRO, "OPS:message1", string=True)
floor_coordinator = Component(EpicsSignalRO, "OPS:message2", string=True)
fill_pattern = Component(EpicsSignalRO, "OPS:message3", string=True)
last_problem_message = Component(EpicsSignalRO, "OPS:message4", string=True)
last_trip_message = Component(EpicsSignalRO, "OPS:message5", string=True)
# messages 6-8: meaning?
message6 = Component(EpicsSignalRO, "OPS:message6", string=True)
message7 = Component(EpicsSignalRO, "OPS:message7", string=True)
message8 = Component(EpicsSignalRO, "OPS:message8", string=True)
class ApsMachineParametersDevice(Device):
"""
common operational parameters of the APS of general interest
EXAMPLE::
import APS_BlueSky_tools.devices as APS_devices
APS = APS_devices.ApsMachineParametersDevice(name="APS")
aps_current = APS.current
# make sure these values are logged at start and stop of every scan
sd.baseline.append(APS)
# record storage ring current as secondary stream during scans
# name: aps_current_monitor
# db[-1].table("aps_current_monitor")
sd.monitors.append(aps_current)
The `sd.baseline` and `sd.monitors` usage relies on this global setup:
from bluesky import SupplementalData
sd = SupplementalData()
RE.preprocessors.append(sd)
.. autosummary::
~inUserOperations
"""
current = Component(EpicsSignalRO, "S:SRcurrentAI")
lifetime = Component(EpicsSignalRO, "S:SRlifeTimeHrsCC")
machine_status = Component(EpicsSignalRO, "S:DesiredMode", string=True)
# In [3]: APS.machine_status.enum_strs
# Out[3]:
# ('State Unknown',
# 'USER OPERATIONS',
# 'Bm Ln Studies',
# 'INJ Studies',
# 'ASD Studies',
# 'NO BEAM',
# 'MAINTENANCE')
operating_mode = Component(EpicsSignalRO, "S:ActualMode", string=True)
# In [4]: APS.operating_mode.enum_strs
# Out[4]:
# ('State Unknown',
# 'NO BEAM',
# 'Injecting',
# 'Stored Beam',
# 'Delivered Beam',
# 'MAINTENANCE')
shutter_permit = Component(EpicsSignalRO, "ACIS:ShutterPermit", string=True)
fill_number = Component(EpicsSignalRO, "S:FillNumber")
orbit_correction = Component(EpicsSignalRO, "S:OrbitCorrection:CC")
global_feedback = Component(EpicsSignalRO, "SRFB:GBL:LoopStatusBI", string=True)
global_feedback_h = Component(EpicsSignalRO, "SRFB:GBL:HLoopStatusBI", string=True)
global_feedback_v = Component(EpicsSignalRO, "SRFB:GBL:VLoopStatusBI", string=True)
operator_messages = Component(ApsOperatorMessagesDevice)
@property
def inUserOperations(self):
"""
determine if APS is in User Operations mode (boolean)
Use this property to configure ophyd Devices for direct or simulated hardware.
See issue #49 (https://github.com/BCDA-APS/APS_BlueSky_tools/issues/49) for details.
EXAMPLE::
APS = APS_BlueSky_tools.devices.ApsMachineParametersDevice(name="APS")
if APS.inUserOperations:
suspend_APS_current = bluesky.suspenders.SuspendFloor(APS.current, 2, resume_thresh=10)
RE.install_suspender(suspend_APS_current)
else:
# use pseudo shutter controls and no current suspenders
pass
"""
verdict = self.machine_status.value in (1, "USER OPERATIONS")
# verdict = verdict and self.operating_mode.value not in (5, "MAINTENANCE")
return verdict
class ApsPssShutter(Device):
"""
APS PSS shutter
* APS PSS shutters have separate bit PVs for open and close
* set either bit, the shutter moves, and the bit resets a short time later
* no indication that the shutter has actually moved from the bits
(see :func:`ApsPssShutterWithStatus()` for alternative)
EXAMPLE::
shutter_a = ApsPssShutter("2bma:A_shutter", name="shutter")
shutter_a.open()
shutter_a.close()
shutter_a.set("open")
shutter_a.set("close")
When using the shutter in a plan, be sure to use ``yield from``, such as::
def in_a_plan(shutter):
yield from abs_set(shutter, "open", wait=True)
# do something
yield from abs_set(shutter, "close", wait=True)
RE(in_a_plan(shutter_a))
The strings accepted by `set()` are defined in two lists:
`valid_open_values` and `valid_close_values`. These lists
are treated (internally to `set()`) as lower case strings.
Example, add "o" & "x" as aliases for "open" & "close":
shutter_a.valid_open_values.append("o")
shutter_a.valid_close_values.append("x")
shutter_a.set("o")
shutter_a.set("x")
"""
open_bit = Component(EpicsSignal, ":open")
close_bit = Component(EpicsSignal, ":close")
delay_s = 1.2
valid_open_values = ["open",] # lower-case strings ONLY
valid_close_values = ["close",]
busy = Signal(value=False, name="busy")
def open(self):
"""request shutter to open, interactive use"""
self.open_bit.put(1)
def close(self):
"""request shutter to close, interactive use"""
self.close_bit.put(1)
def set(self, value, **kwargs):
"""request the shutter to open or close, BlueSky plan use"""
# ensure numerical additions to lists are now strings
def input_filter(v):
return str(v).lower()
self.valid_open_values = list(map(input_filter, self.valid_open_values))
self.valid_close_values = list(map(input_filter, self.valid_close_values))
if self.busy.value:
raise RuntimeError("shutter is operating")
acceptables = self.valid_open_values + self.valid_close_values
if input_filter(value) not in acceptables:
msg = "value should be one of " + " | ".join(acceptables)
msg += " : received " + str(value)
raise ValueError(msg)
status = DeviceStatus(self)
def move_shutter():
if input_filter(value) in self.valid_open_values:
self.open() # no need to yield inside a thread
elif input_filter(value) in self.valid_close_values:
self.close()
def run_and_delay():
self.busy.put(True)
move_shutter()
# sleep, since we don't *know* when the shutter has moved
time.sleep(self.delay_s)
self.busy.put(False)
status._finished(success=True)
threading.Thread(target=run_and_delay, daemon=True).start()
return status
class ApsPssShutterWithStatus(Device):
"""
APS PSS shutter with separate status PV
* APS PSS shutters have separate bit PVs for open and close
* set either bit, the shutter moves, and the bit resets a short time later
* a separate status PV tells if the shutter is open or closed
(see :func:`ApsPssShutter()` for alternative)
EXAMPLE::
A_shutter = ApsPssShutterWithStatus(
"2bma:A_shutter",
"PA:02BM:STA_A_FES_OPEN_PL",
name="A_shutter")
B_shutter = ApsPssShutterWithStatus(
"2bma:B_shutter",
"PA:02BM:STA_B_SBS_OPEN_PL",
name="B_shutter")
A_shutter.open()
A_shutter.close()
or
%mov A_shutter "open"
%mov A_shutter "close"
or
A_shutter.set("open") # MUST be "open", not "Open"
A_shutter.set("close")
When using the shutter in a plan, be sure to use `yield from`.
def in_a_plan(shutter):
yield from abs_set(shutter, "open", wait=True)
# do something
yield from abs_set(shutter, "close", wait=True)
RE(in_a_plan(A_shutter))
The strings accepted by `set()` are defined in attributes
(`open_str` and `close_str`).
"""
open_bit = Component(EpicsSignal, ":open")
close_bit = Component(EpicsSignal, ":close")
pss_state = FormattedComponent(EpicsSignalRO, "{self.state_pv}")
# strings the user will use
open_str = 'open'
close_str = 'close'
# pss_state PV values from EPICS
open_val = 1
close_val = 0
def __init__(self, prefix, state_pv, *args, **kwargs):
self.state_pv = state_pv
super().__init__(prefix, *args, **kwargs)
def open(self, timeout=10):
" "
ophyd.status.wait(self.set(self.open_str), timeout=timeout)
def close(self, timeout=10):
" "
ophyd.status.wait(self.set(self.close_str), timeout=timeout)
def set(self, value, **kwargs):
# first, validate the input value
acceptables = (self.close_str, self.open_str)
if value not in acceptables:
msg = "value should be one of " + " | ".join(acceptables)
msg += " : received " + str(value)
raise ValueError(msg)
command_signal = {
self.open_str: self.open_bit,
self.close_str: self.close_bit
}[value]
expected_value = {
self.open_str: self.open_val,
self.close_str: self.close_val
}[value]
working_status = DeviceStatus(self)
def shutter_cb(value, timestamp, **kwargs):
# APS shutter state PVs do not define strings, use numbers
#value = enums[int(value)]
value = int(value)
if value == expected_value:
self.pss_state.clear_sub(shutter_cb)
working_status._finished()
self.pss_state.subscribe(shutter_cb)
command_signal.set(1)
return working_status
@property
def isOpen(self):
" "
return self.pss_state.value == self.open_val
@property
def isClosed(self):
" "
return self.pss_state.value == self.close_val
class SimulatedApsPssShutterWithStatus(Device):
"""
Simulated APS PSS shutter
EXAMPLE::
sim = SimulatedApsPssShutterWithStatus(name="sim")
"""
open_bit = Component(Signal)
close_bit = Component(Signal)
pss_state = FormattedComponent(Signal)
# strings the user will use
open_str = 'open'
close_str = 'close'
# pss_state PV values from EPICS
open_val = 1
close_val = 0
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.open_bit.set(0)
self.close_bit.set(0)
self.pss_state.set(self.close_val)
def open(self, timeout=10):
"""request the shutter to open"""
self.set(self.open_str)
def close(self, timeout=10):
"""request the shutter to close"""
self.set(self.close_str)
def get_response_time(self):
"""simulated response time for PSS status"""
# return 0.5
return np.random.uniform(0.1, 0.9)
def set(self, value, **kwargs):
"""set the shutter to "close" or "open" """
# first, validate the input value
acceptables = (self.close_str, self.open_str)
if value not in acceptables:
msg = "value should be one of " + " | ".join(acceptables)
msg += " : received " + str(value)
raise ValueError(msg)
command_signal = {
self.open_str: self.open_bit,
self.close_str: self.close_bit
}[value]
expected_value = {
self.open_str: self.open_val,
self.close_str: self.close_val
}[value]
working_status = DeviceStatus(self)
simulate_delay = self.pss_state.value != expected_value
def shutter_cb(value, timestamp, **kwargs):
self.pss_state.clear_sub(shutter_cb)
if simulate_delay:
time.sleep(self.get_response_time())
self.pss_state.set(expected_value)
working_status._finished()
self.pss_state.subscribe(shutter_cb)
command_signal.put(1)
# finally, make sure both signals are reset
self.open_bit.put(0)
self.close_bit.put(0)
return working_status
@property
def isOpen(self):
"""is the shutter open?"""
if self.pss_state.value is None:
self.pss_state.set(self.close_val)
return self.pss_state.value == self.open_val
@property
def isClosed(self):
"""is the shutter closed?"""
if self.pss_state.value is None:
self.pss_state.set(self.close_val)
return self.pss_state.value == self.close_val
class ApsUndulator(Device):
"""
APS Undulator
EXAMPLE::
undulator = ApsUndulator("ID09ds:", name="undulator")
"""
energy = Component(EpicsSignal, "Energy", write_pv="EnergySet")
energy_taper = Component(EpicsSignal, "TaperEnergy", write_pv="TaperEnergySet")
gap = Component(EpicsSignal, "Gap", write_pv="GapSet")
gap_taper = Component(EpicsSignal, "TaperGap", write_pv="TaperGapSet")
start_button = Component(EpicsSignal, "Start")
stop_button = Component(EpicsSignal, "Stop")
harmonic_value = Component(EpicsSignal, "HarmonicValue")
gap_deadband = Component(EpicsSignal, "DeadbandGap")
device_limit = Component(EpicsSignal, "DeviceLimit")
access_mode = Component(EpicsSignalRO, "AccessSecurity")
device_status = Component(EpicsSignalRO, "Busy")
total_power = Component(EpicsSignalRO, "TotalPower")
message1 = Component(EpicsSignalRO, "Message1")
message2 = Component(EpicsSignalRO, "Message2")
message3 = Component(EpicsSignalRO, "Message3")
time_left = Component(EpicsSignalRO, "ShClosedTime")
device = Component(EpicsSignalRO, "Device")
location = Component(EpicsSignalRO, "Location")
version = Component(EpicsSignalRO, "Version")
class ApsUndulatorDual(Device):
"""
APS Undulator with upstream *and* downstream controls
EXAMPLE::
undulator = ApsUndulatorDual("ID09", name="undulator")
note:: the trailing ``:`` in the PV prefix should be omitted
"""
upstream = Component(ApsUndulator, "us:")
downstream = Component(ApsUndulator, "ds:")
class ApsBssUserInfoDevice(Device):
"""
provide current experiment info from the APS BSS
BSS: Beamtime Scheduling System
EXAMPLE::
bss_user_info = ApsBssUserInfoDevice(
"9id_bss:",
name="bss_user_info")
sd.baseline.append(bss_user_info)
"""
proposal_number = Component(EpicsSignal, "proposal_number")
activity = Component(EpicsSignal, "activity", string=True)
badge = Component(EpicsSignal, "badge", string=True)
bss_name = Component(EpicsSignal, "bss_name", string=True)
contact = Component(EpicsSignal, "contact", string=True)
email = Component(EpicsSignal, "email", string=True)
institution = Component(EpicsSignal, "institution", string=True)
station = Component(EpicsSignal, "station", string=True)
team_others = Component(EpicsSignal, "team_others", string=True)
time_begin = Component(EpicsSignal, "time_begin", string=True)
time_end = Component(EpicsSignal, "time_end", string=True)
timestamp = Component(EpicsSignal, "timestamp", string=True)
title = Component(EpicsSignal, "title", string=True)
# not yet updated, see: https://git.aps.anl.gov/jemian/aps_bss_user_info/issues/10
esaf = Component(EpicsSignal, "esaf", string=True)
esaf_contact = Component(EpicsSignal, "esaf_contact", string=True)
esaf_team = Component(EpicsSignal, "esaf_team", string=True)
class DeviceMixinBase(Device):
"""Base class for APS_Bluesky_tools Device mixin classes"""
class AxisTunerException(ValueError):
"""Exception during execution of `AxisTunerBase` subclass"""
class AxisTunerMixin(EpicsMotor):
"""
Mixin class to provide tuning capabilities for an axis
See the `TuneAxis()` example in this jupyter notebook:
https://github.com/BCDA-APS/APS_BlueSky_tools/blob/master/docs/source/resources/demo_tuneaxis.ipynb
HOOK METHODS
There are two hook methods (`pre_tune_method()`, and `post_tune_method()`)
for callers to add additional plan parts, such as opening or closing shutters,
setting detector parameters, or other actions.
Each hook method must accept a single argument:
an axis object such as `EpicsMotor` or `SynAxis`,
such as::
def my_pre_tune_hook(axis):
yield from bps.mv(shutter, "open")
def my_post_tune_hook(axis):
yield from bps.mv(shutter, "close")
class TunableSynAxis(AxisTunerMixin, SynAxis): pass
myaxis = TunableSynAxis(name="myaxis")
mydet = SynGauss('mydet', myaxis, 'myaxis', center=0.21, Imax=0.98e5, sigma=0.127)
myaxis.tuner = TuneAxis([mydet], myaxis)
myaxis.pre_tune_method = my_pre_tune_hook
myaxis.post_tune_method = my_post_tune_hook
RE(myaxis.tune())
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.tuner = None # such as: APS_BlueSky_tools.plans.TuneAxis
# Hook functions for callers to add additional plan parts
# Each must accept one argument: axis object such as `EpicsMotor` or `SynAxis`
self.pre_tune_method = self._default_pre_tune_method
self.post_tune_method = self._default_post_tune_method
def _default_pre_tune_method(self):
"""called before `tune()`"""
print("{} position before tuning: {}".format(self.name, self.position))
def _default_post_tune_method(self):
"""called after `tune()`"""
print("{} position after tuning: {}".format(self.name, self.position))
def tune(self, md=None, **kwargs):
if self.tuner is None:
msg = "Must define an axis tuner, none specified."
msg += " Consider using APS_BlueSky_tools.plans.TuneAxis()"
raise AxisTunerException(msg)
if self.tuner.axis is None:
msg = "Must define an axis, none specified."
raise AxisTunerException(msg)
if md is None:
md = OrderedDict()
md["purpose"] = "tuner"
md["datetime"] = str(datetime.now())
if self.tuner is not None:
if self.pre_tune_method is not None:
self.pre_tune_method()
yield from self.tuner.tune(md=md, **kwargs)
if self.post_tune_method is not None:
self.post_tune_method()
# TODO: issue #76
# class TunableSynAxis(AxisTunerMixin, SynAxis): """synthetic axis that can be tuned"""
# class TunableEpicsMotor(AxisTunerMixin, EpicsMotor): """EpicsMotor that can be tuned"""
class EpicsDescriptionMixin(DeviceMixinBase):
"""
add a record's description field to a Device, such as EpicsMotor
EXAMPLE::
from ophyd import EpicsMotor
from APS_BlueSky_tools.devices import EpicsDescriptionMixin
class myEpicsMotor(EpicsDescriptionMixin, EpicsMotor): pass
m1 = myEpicsMotor('xxx:m1', name='m1')
print(m1.desc.value)
"""
desc = Component(EpicsSignal, ".DESC")
class EpicsMotorDialMixin(DeviceMixinBase):
"""
add motor record's dial coordinate fields to Device
EXAMPLE::
from ophyd import EpicsMotor
from APS_BlueSky_tools.devices import EpicsMotorDialMixin
class myEpicsMotor(EpicsMotorDialMixin, EpicsMotor): pass
m1 = myEpicsMotor('xxx:m1', name='m1')
print(m1.dial.read())
"""
dial = Component(EpicsSignal, ".DRBV", write_pv=".DVAL")
class EpicsMotorLimitsMixin(DeviceMixinBase):
"""
add motor record HLM & LLM fields & compatibility get_lim() and set_lim()
EXAMPLE::
from ophyd import EpicsMotor
from APS_BlueSky_tools.devices import EpicsMotorLimitsMixin
class myEpicsMotor(EpicsMotorLimitsMixin, EpicsMotor): pass
m1 = myEpicsMotor('xxx:m1', name='m1')
lo = m1.get_lim(-1)
hi = m1.get_lim(1)
m1.set_lim(-25, -5)
print(m1.get_lim(-1), m1.get_lim(1))
m1.set_lim(lo, hi)
"""
soft_limit_lo = Component(EpicsSignal, ".LLM")
soft_limit_hi = Component(EpicsSignal, ".HLM")
def get_lim(self, flag):
"""
Returns the user limit of motor
* flag > 0: returns high limit
* flag < 0: returns low limit
* flag == 0: returns None
Similar with SPEC command
"""
if flag > 0:
return self.soft_limit_hi.value
else:
return self.soft_limit_lo.value
def set_lim(self, low, high):
"""
Sets the low and high limits of motor
* No action taken if motor is moving.
* Low limit is set to lesser of (low, high)
* High limit is set to greater of (low, high)
Similar with SPEC command
"""
if not self.moving:
self.soft_limit_lo.put(min(low, high))
self.soft_limit_hi.put(max(low, high))
class EpicsMotorServoMixin(DeviceMixinBase):
"""
add motor record's servo loop controls to Device
EXAMPLE::
from ophyd import EpicsMotor
from APS_BlueSky_tools.devices import EpicsMotorServoMixin
class myEpicsMotor(EpicsMotorServoMixin, EpicsMotor): pass
m1 = myEpicsMotor('xxx:m1', name='m1')
print(m1.servo.read())
"""
# values: "Enable" or "Disable"
servo = Component(EpicsSignal, ".CNEN", string=True)
class EpicsMotorRawMixin(DeviceMixinBase):
"""
add motor record's raw coordinate fields to Device
EXAMPLE::
from ophyd import EpicsMotor
from APS_BlueSky_tools.devices import EpicsMotorRawMixin
class myEpicsMotor(EpicsMotorRawMixin, EpicsMotor): pass
m1 = myEpicsMotor('xxx:m1', name='m1')
print(m1.raw.read())
"""
raw = Component(EpicsSignal, ".RRBV", write_pv=".RVAL")
# TODO: issue #76
# class EpicsMotorWithDescription(EpicsDescriptionMixin, EpicsMotor):
# """EpicsMotor with description field"""
#
# class EpicsMotorWithMore(
# EpicsDescriptionMixin,
# EpicsMotorLimitsMixin,
# EpicsMotorDialMixin,
# EpicsMotorRawMixin,
# EpicsMotor):
# """
# EpicsMotor with more fields
#
# * description (``desc``)
# * soft motor limits (``soft_limit_hi``, ``soft_limit_lo``)
# * dial coordinates (``dial``)
# * raw coordinates (``raw``)
# """
class EpicsMotorShutter(Device):
"""
a shutter, implemented with an EPICS motor moved between two positions
EXAMPLE::
tomo_shutter = EpicsMotorShutter("2bma:m23", name="tomo_shutter")
tomo_shutter.closed_position = 1.0 # default
tomo_shutter.open_position = 0.0 # default
tomo_shutter.open()
tomo_shutter.close()
# or, when used in a plan
def planA():
yield from abs_set(tomo_shutter, "open", group="O")
yield from wait("O")
yield from abs_set(tomo_shutter, "close", group="X")
yield from wait("X")
def planA():
yield from abs_set(tomo_shutter, "open", wait=True)
yield from abs_set(tomo_shutter, "close", wait=True)
def planA():
yield from mv(tomo_shutter, "open")
yield from mv(tomo_shutter, "close")
"""
motor = Component(EpicsMotor, "")
closed_position = 1.0
open_position = 0.0
_tolerance = 0.01
@property
def isOpen(self):
" "
return abs(self.motor.position - self.open_position) <= self._tolerance
@property
def isClosed(self):
" "
return abs(self.motor.position - self.closed_position) <= self._tolerance
def open(self):
"""move motor to BEAM NOT BLOCKED position, interactive use"""
self.motor.move(self.open_position)
def close(self):
"""move motor to BEAM BLOCKED position, interactive use"""
self.motor.move(self.closed_position)
def set(self, value, *, timeout=None, settle_time=None):
"""
`set()` is like `put()`, but used in BlueSky plans
PARAMETERS
value : "open" or "close"
timeout : float, optional
Maximum time to wait. Note that set_and_wait does not support
an infinite timeout.
settle_time: float, optional
Delay after the set() has completed to indicate completion
to the caller
RETURNS
status : DeviceStatus
"""
# using put completion:
# timeout and settle time is handled by the status object.
status = DeviceStatus(
self, timeout=timeout, settle_time=settle_time)
def put_callback(**kwargs):
status._finished(success=True)
if value.lower() == "open":
pos = self.open_position
elif value.lower() == "close":
pos = self.closed_position
else:
msg = "value should be either open or close"
msg + " : received " + str(value)
raise ValueError(msg)
self.motor.user_setpoint.put(
pos, use_complete=True, callback=put_callback)
return status
class EpicsOnOffShutter(Device):
"""
a shutter, implemented with an EPICS PV moved between two positions
Use for a shutter controlled by a single PV which takes a
value for the close command and a different value for the open command.
The current position is determined by comparing the value of the control
with the expected open and close values.
EXAMPLE::
bit_shutter = EpicsOnOffShutter("2bma:bit1", name="bit_shutter")
bit_shutter.closed_position = 0 # default
bit_shutter.open_position = 1 # default
bit_shutter.open()
bit_shutter.close()
# or, when used in a plan
def planA():
yield from mv(bit_shutter, "open")
yield from mv(bit_shutter, "close")
"""
control = Component(EpicsSignal, "")
closed_position = 0
open_position = 1
@property
def isOpen(self):
" "
return self.control.value == self.open_position
@property
def isClosed(self):
" "
return self.control.value == self.closed_position
def open(self):
"""move control to BEAM NOT BLOCKED position, interactive use"""
self.control.put(self.open_position)
def close(self):
"""move control to BEAM BLOCKED position, interactive use"""
self.control.put(self.closed_position)
def set(self, value, *, timeout=None, settle_time=None):
"""
`set()` is like `put()`, but used in BlueSky plans
PARAMETERS
value : "open" or "close"
timeout : float, optional
Maximum time to wait. Note that set_and_wait does not support
an infinite timeout.
settle_time: float, optional
Delay after the set() has completed to indicate completion
to the caller
RETURNS
status : DeviceStatus
"""
# using put completion:
# timeout and settle time is handled by the status object.
status = DeviceStatus(
self, timeout=timeout, settle_time=settle_time)
def put_callback(**kwargs):
status._finished(success=True)
if value.lower() == "open":
pos = self.open_position
elif value.lower() == "close":
pos = self.closed_position
else:
msg = "value should be either open or close"
msg + " : received " + str(value)
raise ValueError(msg)
self.control.put(
pos, use_complete=True, callback=put_callback)
return status
class DualPf4FilterBox(Device):
"""
Dual Xia PF4 filter boxes using support from synApps (using Al, Ti foils)
EXAMPLE::
pf4 = DualPf4FilterBox("2bmb:pf4:", name="pf4")
pf4_AlTi = DualPf4FilterBox("9idcRIO:pf4:", name="pf4_AlTi")
"""
fPosA = Component(EpicsSignal, "fPosA")
fPosB = Component(EpicsSignal, "fPosB")
bankA = Component(EpicsSignalRO, "bankA")
bankB = Component(EpicsSignalRO, "bankB")
bitFlagA = Component(EpicsSignalRO, "bitFlagA")
bitFlagB = Component(EpicsSignalRO, "bitFlagB")
transmission = Component(EpicsSignalRO, "trans")
transmission_a = Component(EpicsSignalRO, "transA")
transmission_b = Component(EpicsSignalRO, "transB")
inverse_transmission = Component(EpicsSignalRO, "invTrans")
thickness_Al_mm = Component(EpicsSignalRO, "filterAl")
thickness_Ti_mm = Component(EpicsSignalRO, "filterTi")
thickness_glass_mm = Component(EpicsSignalRO, "filterGlass")
energy_keV_local = Component(EpicsSignal, "E:local")
energy_keV_mono = Component(EpicsSignal, "displayEnergy")
mode = Component(EpicsSignal, "useMono", string=True)
class ProcedureRegistry(Device):
"""
Procedure Registry: run a blocking function in a thread
With many instruments, such as USAXS, there are several operating
modes to be used, each with its own setup code. This ophyd Device
should coordinate those modes so that the setup procedures can be called
either as part of a Bluesky plan or from the command line directly.
Assumes that users will write functions to setup a particular
operation or operating mode. The user-written functions may not
be appropriate to use in a plan directly since they might
make blocking calls. The ProcedureRegistry will call the function
in a thread (which is allowed to make blocking calls) and wait
for the thread to complete.
It is assumed that each user-written function will not return until
it is complete.
.. autosummary::
~dir
~add
~remove
~set
~put
EXAMPLE:
Given these function definitions::
def clearScalerNames():
for ch in scaler.channels.configuration_attrs:
if ch.find(".") < 0:
chan = scaler.channels.__getattribute__(ch)
chan.chname.put("")
def setMyScalerNames():
scaler.channels.chan01.chname.put("clock")
scaler.channels.chan02.chname.put("I0")
scaler.channels.chan03.chname.put("detector")
create a registry and add the two functions (default name
is the function name):
use_mode = ProcedureRegistry(name="ProcedureRegistry")
use_mode.add(clearScalerNames)
use_mode.add(setMyScalerNames)
and then use this registry in a plan, such as this::
def myPlan():
yield from bps.mv(use_mode, "setMyScalerNames")
yield from bps.sleep(5)
yield from bps.mv(use_mode, "clearScalerNames")
"""
busy = Component(Signal, value=False)
registry = {}
delay_s = 0
timeout_s = None
state = "__created__"
@property
def dir(self):
"""tuple of procedure names"""
return tuple(sorted(self.registry.keys()))
def add(self, procedure, proc_name=None):
"""
add procedure to registry
"""
#if procedure.__class__ == "function":
nm = proc_name or procedure.__name__
self.registry[nm] = procedure
def remove(self, procedure):
"""
remove procedure from registry
"""
if isinstance(procedure, str):
nm = procedure
else:
nm = procedure.__name__
if nm in self.registry:
del self.registry[nm]
def set(self, proc_name):
"""
run procedure in a thread, return once it is complete
proc_name (str) : name of registered procedure to be run
"""
if not isinstance(proc_name, str):
raise ValueError("expected a procedure name, not {}".format(proc_name))
if proc_name not in self.registry:
raise KeyError("unknown procedure name: "+proc_name)
if self.busy.value:
raise RuntimeError("busy now")
self.state = "__busy__"
status = DeviceStatus(self)
@APS_plans.run_in_thread
def run_and_delay():
self.busy.put(True)
self.registry[proc_name]()
# optional delay
if self.delay_s > 0:
time.sleep(self.delay_s)
self.busy.put(False)
status._finished(success=True)
run_and_delay()
ophyd.status.wait(status, timeout=self.timeout_s)
self.state = proc_name
return status
def put(self, value): # TODO: risky?
"""replaces ophyd Device default put() behavior"""
self.set(value)
# AreaDetector support
AD_FrameType_schemes = {
"reset" : dict( # default names from Area Detector code
ZRST = "Normal",
ONST = "Background",
TWST = "FlatField",
),
"NeXus" : dict( # NeXus (typical locations)
ZRST = "/entry/data/data",
ONST = "/entry/data/dark",
TWST = "/entry/data/white",
),
"DataExchange" : dict( # APS Data Exchange
ZRST = "/exchange/data",
ONST = "/exchange/data_dark",
TWST = "/exchange/data_white",
),
}
def AD_setup_FrameType(prefix, scheme="NeXus"):
"""
configure so frames are identified & handled by type (dark, white, or image)
PARAMETERS
prefix (str) : EPICS PV prefix of area detector, such as "13SIM1:"
scheme (str) : any key in the `AD_FrameType_schemes` dictionary
This routine prepares the EPICS Area Detector to identify frames
by image type for handling by clients, such as the HDF5 file writing plugin.
With the HDF5 plugin, the `FrameType` PV is added to the NDattributes
and then used in the layout file to direct the acquired frame to
the chosen dataset. The `FrameType` PV value provides the HDF5 address
to be used.
To use a different scheme than the defaults, add a new key to
the `AD_FrameType_schemes` dictionary, defining storage values for the
fields of the EPICS `mbbo` record that you will be using.
see: https://github.com/BCDA-APS/use_bluesky/blob/master/notebooks/images_darks_flats.ipynb
EXAMPLE::
AD_setup_FrameType("2bmbPG3:", scheme="DataExchange")
* Call this function *before* creating the ophyd area detector object
* use lower-level PyEpics interface
"""
db = AD_FrameType_schemes.get(scheme)
if db is None:
msg = "unknown AD_FrameType_schemes scheme: {}".format(scheme)
msg += "\n Should be one of: " + ", ".join(AD_FrameType_schemes.keys())
raise ValueError(msg)
template = "{}cam1:FrameType{}.{}"
for field, value in db.items():
epics.caput(template.format(prefix, "", field), value)
epics.caput(template.format(prefix, "_RBV", field), value)
def AD_warmed_up(detector):
"""
Has area detector pushed an NDarray to the HDF5 plugin? True or False
Works around an observed issue: #598
https://github.com/NSLS-II/ophyd/issues/598#issuecomment-414311372
If detector IOC has just been started and has not yet taken an image
with the HDF5 plugin, then a TimeoutError will occur as the
HDF5 plugin "Capture" is set to 1 (Start). In such case,
first acquire at least one image with the HDF5 plugin enabled.
"""
old_capture = detector.hdf1.capture.value
old_file_write_mode = detector.hdf1.file_write_mode.value
if old_capture == 1:
return True
detector.hdf1.file_write_mode.put(1)
detector.hdf1.capture.put(1)
verdict = detector.hdf1.capture.get() == 1
detector.hdf1.capture.put(old_capture)
detector.hdf1.file_write_mode.put(old_file_write_mode)
return verdict
class AD_EpicsHdf5FileName(FileStorePluginBase):
"""
custom class to define image file name from EPICS
.. caution:: *Caveat emptor* applies here. You assume expertise!
Replace standard Bluesky algorithm where file names
are defined as UUID strings, virtually guaranteeing that
no existing images files will ever be overwritten.
Also, this method decouples the data files from the databroker,
which needs the files to be named by UUID.
.. autosummary::
~make_filename
~generate_datum
~get_frames_per_point
~stage
To allow users to control the file **name**,
we override the ``make_filename()`` method here
and we need to override some intervening classes.
To allow users to control the file **number**,
we override the ``stage()`` method here
and triple-comment out that line, and bring in
sections from the methods we are replacing here.
The image file name is set in `FileStoreBase.make_filename()`
from `ophyd.areadetector.filestore_mixins`. This is called
(during device staging) from `FileStoreBase.stage()`
EXAMPLE:
To use this custom class, we need to connect it to some
intervening structure. Here are the steps:
#. override default file naming
#. use to make your custom iterative writer
#. use to make your custom HDF5 plugin
#. use to make your custom AD support
imports::
from bluesky import RunEngine, plans as bp
from ophyd.areadetector import SimDetector, SingleTrigger
from ophyd.areadetector import ADComponent, ImagePlugin, SimDetectorCam
from ophyd.areadetector import HDF5Plugin
from ophyd.areadetector.filestore_mixins import FileStoreIterativeWrite
override default file naming::
from APS_BlueSky_tools.devices import AD_EpicsHdf5FileName
make a custom iterative writer::
class myHdf5EpicsIterativeWriter(AD_EpicsHdf5FileName, FileStoreIterativeWrite): pass
make a custom HDF5 plugin::
class myHDF5FileNames(HDF5Plugin, myHdf5EpicsIterativeWriter): pass
define support for the detector (simulated detector here)::
class MySimDetector(SingleTrigger, SimDetector):
'''SimDetector with HDF5 file names specified by EPICS'''
cam = ADComponent(SimDetectorCam, "cam1:")
image = ADComponent(ImagePlugin, "image1:")
hdf1 = ADComponent(
myHDF5FileNames,
suffix = "HDF1:",
root = "/",
write_path_template = "/",
)
create an instance of the detector::
simdet = MySimDetector("13SIM1:", name="simdet")
if hasattr(simdet.hdf1.stage_sigs, "array_counter"):
# remove this so array counter is not set to zero each staging
del simdet.hdf1.stage_sigs["array_counter"]
simdet.hdf1.stage_sigs["file_template"] = '%s%s_%3.3d.h5'
setup the file names using the EPICS HDF5 plugin::
simdet.hdf1.file_path.put("/tmp/simdet_demo/") # ! ALWAYS end with a "/" !
simdet.hdf1.file_name.put("test")
simdet.hdf1.array_counter.put(0)
If you have not already, create a bluesky RunEngine::
RE = RunEngine({})
take an image::
RE(bp.count([simdet]))
INTERNAL METHODS
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.filestore_spec = 'AD_HDF5' # spec name stored in resource doc
self.stage_sigs.update([
('file_template', '%s%s_%4.4d.h5'),
('file_write_mode', 'Stream'),
('capture', 1)
])
def make_filename(self):
"""
overrides default behavior: Get info from EPICS HDF5 plugin.
"""
# start of the file name, file number will be appended per template
filename = self.file_name.value
# this is where the HDF5 plugin will write the image,
# relative to the IOC's filesystem
write_path = self.file_path.value
# this is where the DataBroker will find the image,
# on a filesystem accessible to BlueSky
read_path = write_path
return filename, read_path, write_path
def generate_datum(self, key, timestamp, datum_kwargs):
"""Generate a uid and cache it with its key for later insertion."""
template = self.file_template.get()
filename, read_path, write_path = self.make_filename()
file_number = self.file_number.get()
hdf5_file_name = template % (read_path, filename, file_number)
# inject the actual name of the HDF5 file here into datum_kwargs
datum_kwargs["HDF5_file_name"] = hdf5_file_name
# print("make_filename:", hdf5_file_name)
return super().generate_datum(key, timestamp, datum_kwargs)
def get_frames_per_point(self):
"""overrides default behavior"""
return self.num_capture.get()
def stage(self):
"""
overrides default behavior
Set EPICS items before device is staged, then copy EPICS
naming template (and other items) to ophyd after staging.
"""
# Make a filename.
filename, read_path, write_path = self.make_filename()
# Ensure we do not have an old file open.
set_and_wait(self.capture, 0)
# These must be set before parent is staged (specifically
# before capture mode is turned on. They will not be reset
# on 'unstage' anyway.
set_and_wait(self.file_path, write_path)
set_and_wait(self.file_name, filename)
### set_and_wait(self.file_number, 0)
# get file number now since it is incremented during stage()
file_number = self.file_number.get()
# Must avoid parent's stage() since it sets file_number to 0
# Want to call grandparent's stage()
#super().stage() # avoid this - sets `file_number` to zero
# call grandparent.stage()
FileStoreBase.stage(self)
# AD does the file name templating in C
# We can't access that result until after acquisition
# so we apply the same template here in Python.
template = self.file_template.get()
self._fn = template % (read_path, filename, file_number)
self._fp = read_path
if not self.file_path_exists.get():
raise IOError("Path {} does not exist on IOC.".format(
self.file_path.get()))
# from FileStoreIterativeWrite.stage()
self._point_counter = itertools.count()
# from FileStoreHDF5.stage()
res_kwargs = {'frame_per_point': self.get_frames_per_point()}
self._generate_resource(res_kwargs) | APS-BlueSky-tools | /APS_BlueSky_tools-2019.103.0.tar.gz/APS_BlueSky_tools-2019.103.0/APS_BlueSky_tools/devices.py | devices.py |
# Copyright (c) 2017-2018, UChicago Argonne, LLC. See LICENSE file.
import databroker
import datetime
from .filewriters import SpecWriterCallback, _rebuild_scan_command
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
DEMO_SPEC_FILE = "test_specdata.txt"
def specfile_example(headers, filename=DEMO_SPEC_FILE):
"""write one or more headers (scans) to a SPEC data file"""
specwriter = SpecWriterCallback(filename=filename, auto_write=True)
if not isinstance(headers, (list, databroker._core.Results)):
headers = [headers]
for h in headers:
for key, doc in h.db.get_documents(h):
specwriter.receiver(key, doc)
lines = specwriter.prepare_scan_contents()
if lines is not None:
logger.info("\n".join(lines))
logger.info("#"*60)
logger.info("Look at SPEC data file: "+specwriter.spec_filename)
def plan_catalog(db):
"""
make a table of all scans known in the databroker
Example::
from APS_BlueSky_tools.examples import plan_catalog
plan_catalog(db)
"""
import pyRestTable
t = pyRestTable.Table()
t.labels = "date/time short_uid id plan args".split()
for h in db.hs.find_run_starts():
row = []
dt = datetime.datetime.fromtimestamp(h["time"])
row.append(str(dt).split(".")[0])
row.append(h['uid'][:8])
command = _rebuild_scan_command(h)
scan_id = command.split()[0]
command = command[len(scan_id):].strip()
plan = command.split("(")[0]
args = command[len(plan)+1:].rstrip(")")
row.append(scan_id)
row.append(plan)
row.append(args)
t.addRow(row)
t.rows = t.rows[::-1] # reverse the list
return t
def main():
"""
summary list of all scans in the databroker
``aps_bluesky_tools_plan_catalog`` command-line application
This can be unwieldy if there are many scans in the databroker.
Consider it as a demo program rather than for general, long-term use.
"""
from databroker import Broker
# load config from ~/.config/databroker/mongodb_config.yml
db = Broker.named("mongodb_config")
table = plan_catalog(db)
print(table)
print("Found {} plans (start documents)".format(len(table.rows)))
if __name__ == "__main__":
main()
# load config from ~/.config/databroker/mongodb_config.yml
# db = Broker.named("mongodb_config")
# plan_catalog(db)
# specfile_example(db[-1])
# specfile_example(db[-5:][::-1])
# specfile_example(db["1d2a3890"])
# specfile_example(db["15d12d"])
# specfile_example(db[-10:-5])
# specfile_example(db[-80])
# specfile_example(db[-10000:][-25:]) | APS-BlueSky-tools | /APS_BlueSky_tools-2019.103.0.tar.gz/APS_BlueSky_tools-2019.103.0/APS_BlueSky_tools/examples.py | examples.py |
from collections import OrderedDict
import datetime
import logging
import numpy as np
import sys
import threading
import time
from bluesky import preprocessors as bpp
from bluesky import plan_stubs as bps
from bluesky import plans as bp
from bluesky.callbacks.fitting import PeakStats
import ophyd
from ophyd import Device, Component, Signal
logger = logging.getLogger(__name__).addHandler(logging.NullHandler())
def run_in_thread(func):
"""
(decorator) run ``func`` in thread
USAGE::
@run_in_thread
def progress_reporting():
logger.debug("progress_reporting is starting")
# ...
#...
progress_reporting() # runs in separate thread
#...
"""
def wrapper(*args, **kwargs):
thread = threading.Thread(target=func, args=args, kwargs=kwargs)
thread.start()
return thread
return wrapper
def run_blocker_in_plan(blocker, *args, _poll_s_=0.01, _timeout_s_=None, **kwargs):
"""
plan: run blocking function ``blocker_(*args, **kwargs)`` from a Bluesky plan
PARAMETERS
blocker : func
function object to be called in a Bluesky plan
_poll_s_ : float
sleep interval in loop while waiting for completion
(default: 0.01)
_timeout_s_ : float
maximum time for completion
(default: `None` which means no timeout)
Example: use ``time.sleep`` as blocking function::
RE(run_blocker_in_plan(time.sleep, 2.14))
Example: in a plan, use ``time.sleep`` as blocking function::
def my_sleep(t=1.0):
yield from run_blocker_in_plan(time.sleep, t)
RE(my_sleep())
"""
status = ophyd.status.Status()
@run_in_thread
def _internal(blocking_function, *args, **kwargs):
blocking_function(*args, **kwargs)
status._finished(success=True, done=True)
if _timeout_s_ is not None:
t_expire = time.time() + _timeout_s_
# FIXME: how to keep this from running during summarize_plan()?
_internal(blocker, *args, **kwargs)
while not status.done:
if _timeout_s_ is not None:
if time.time() > t_expire:
status._finished(success=False, done=True)
break
yield from bps.sleep(_poll_s_)
return status
def nscan(detectors, *motor_sets, num=11, per_step=None, md=None):
"""
Scan over ``n`` variables moved together, each in equally spaced steps.
PARAMETERS
detectors : list
list of 'readable' objects
motor_sets : list
sequence of one or more groups of: motor, start, finish
motor : object
any 'settable' object (motor, temp controller, etc.)
start : float
starting position of motor
finish : float
ending position of motor
num : int
number of steps (default = 11)
per_step : callable, optional
hook for customizing action of inner loop (messages per step)
Expected signature: ``f(detectors, step_cache, pos_cache)``
md : dict, optional
metadata
See the `nscan()` example in a Jupyter notebook:
https://github.com/BCDA-APS/APS_BlueSky_tools/blob/master/docs/source/resources/demo_nscan.ipynb
"""
# TODO: Isn't there a similar plan in bluesky? At least reference it.
def take_n_at_a_time(args, n=2):
yield from zip(*[iter(args)]*n)
if len(motor_sets) < 3:
raise ValueError("must provide at least one movable")
if len(motor_sets) % 3 > 0:
raise ValueError("must provide sets of movable, start, finish")
motors = OrderedDict()
for m, s, f in take_n_at_a_time(motor_sets, n=3):
if not isinstance(s, (int, float)):
msg = "start={} ({}): is not a number".format(s, type(s))
raise ValueError(msg)
if not isinstance(f, (int, float)):
msg = "finish={} ({}): is not a number".format(f, type(f))
raise ValueError(msg)
motors[m.name] = dict(motor=m, start=s, finish=f,
steps=np.linspace(start=s, stop=f, num=num))
_md = {'detectors': [det.name for det in detectors],
'motors': [m for m in motors.keys()],
'num_points': num,
'num_intervals': num - 1,
'plan_args': {'detectors': list(map(repr, detectors)),
'num': num,
'motors': repr(motor_sets),
'per_step': repr(per_step)},
'plan_name': 'nscan',
'plan_pattern': 'linspace',
'hints': {},
'iso8601': datetime.datetime.now(),
}
_md.update(md or {})
try:
m = list(motors.keys())[0]
dimensions = [(motors[m]["motor"].hints['fields'], 'primary')]
except (AttributeError, KeyError):
pass
else:
_md['hints'].setdefault('dimensions', dimensions)
if per_step is None:
per_step = bps.one_nd_step
@bpp.stage_decorator(list(detectors)
+ [m["motor"] for m in motors.values()])
@bpp.run_decorator(md=_md)
def inner_scan():
for step in range(num):
step_cache, pos_cache = {}, {}
for m in motors.values():
next_pos = m["steps"][step]
m = m["motor"]
pos_cache[m] = m.read()[m.name]["value"]
step_cache[m] = next_pos
yield from per_step(detectors, step_cache, pos_cache)
return (yield from inner_scan())
def snapshot(obj_list, stream="primary", md=None):
"""
bluesky plan: record current values of list of ophyd signals
PARAMETERS
obj_list : list
list of ophyd Signal or EpicsSignal objects
stream : str
document stream, default: "primary"
md : dict
metadata
"""
from .__init__ import __version__
import bluesky
import databroker
import epics
from ophyd import EpicsSignal
import socket
import getpass
objects = []
for obj in obj_list:
# TODO: consider supporting Device objects
if isinstance(obj, (Signal, EpicsSignal)) and obj.connected:
objects.append(obj)
else:
if hasattr(obj, "pvname"):
nm = obj.pvname
else:
nm = obj.name
print(f"ignoring object: {nm}")
if len(objects) == 0:
raise ValueError("No signals to log.")
hostname = socket.gethostname() or 'localhost'
username = getpass.getuser() or 'bluesky_user'
# we want this metadata to appear
_md = dict(
plan_name = "snapshot",
plan_description = "archive snapshot of ophyd Signals (usually EPICS PVs)",
iso8601 = str(datetime.datetime.now()), # human-readable
hints = {},
software_versions = dict(
python = sys.version,
PyEpics = epics.__version__,
bluesky = bluesky.__version__,
ophyd = ophyd.__version__,
databroker = databroker.__version__,
APS_Bluesky_Tools = __version__,),
hostname = hostname,
username = username,
login_id = f"{username}@{hostname}",
)
# caller may have given us additional metadata
_md.update(md or {})
def _snap(md=None):
yield from bps.open_run(md)
yield from bps.create(name=stream)
for obj in objects:
# passive observation: DO NOT TRIGGER, only read
yield from bps.read(obj)
yield from bps.save()
yield from bps.close_run()
return (yield from _snap(md=_md))
# def sscan(*args, md=None, **kw): # TODO: planned
# """
# gather data form the sscan record and emit documents
#
# Should this operate a complete scan using the sscan record?
# """
# raise NotImplemented("this is only planned")
class TuneAxis(object):
"""
tune an axis with a signal
This class provides a tuning object so that a Device or other entity
may gain its own tuning process, keeping track of the particulars
needed to tune this device again. For example, one could add
a tuner to a motor stage::
motor = EpicsMotor("xxx:motor", "motor")
motor.tuner = TuneAxis([det], motor)
Then the ``motor`` could be tuned individually::
RE(motor.tuner.tune(md={"activity": "tuning"}))
or the :meth:`tune()` could be part of a plan with other steps.
Example::
tuner = TuneAxis([det], axis)
live_table = LiveTable(["axis", "det"])
RE(tuner.multi_pass_tune(width=2, num=9), live_table)
RE(tuner.tune(width=0.05, num=9), live_table)
Also see the jupyter notebook referenced here:
:ref:`example_tuneaxis`.
.. autosummary::
~tune
~multi_pass_tune
~peak_detected
"""
_peak_choices_ = "cen com".split()
def __init__(self, signals, axis, signal_name=None):
self.signals = signals
self.signal_name = signal_name or signals[0].name
self.axis = axis
self.stats = {}
self.tune_ok = False
self.peaks = None
self.peak_choice = self._peak_choices_[0]
self.center = None
self.stats = []
# defaults
self.width = 1
self.num = 10
self.step_factor = 4
self.pass_max = 6
self.snake = True
def tune(self, width=None, num=None, md=None):
"""
BlueSky plan to execute one pass through the current scan range
Scan self.axis centered about current position from
``-width/2`` to ``+width/2`` with ``num`` observations.
If a peak was detected (default check is that max >= 4*min),
then set ``self.tune_ok = True``.
PARAMETERS
width : float
width of the tuning scan in the units of ``self.axis``
Default value in ``self.width`` (initially 1)
num : int
number of steps
Default value in ``self.num`` (initially 10)
md : dict, optional
metadata
"""
width = width or self.width
num = num or self.num
if self.peak_choice not in self._peak_choices_:
msg = "peak_choice must be one of {}, geave {}"
msg = msg.format(self._peak_choices_, self.peak_choice)
raise ValueError(msg)
initial_position = self.axis.position
final_position = initial_position # unless tuned
start = initial_position - width/2
finish = initial_position + width/2
self.tune_ok = False
tune_md = dict(
width = width,
initial_position = self.axis.position,
time_iso8601 = str(datetime.datetime.now()),
)
_md = {'tune_md': tune_md,
'plan_name': self.__class__.__name__ + '.tune',
'tune_parameters': dict(
num = num,
width = width,
initial_position = self.axis.position,
peak_choice = self.peak_choice,
x_axis = self.axis.name,
y_axis = self.signal_name,
),
'hints': dict(
dimensions = [
(
[self.axis.name],
'primary')]
)
}
_md.update(md or {})
if "pass_max" not in _md:
self.stats = []
self.peaks = PeakStats(x=self.axis.name, y=self.signal_name)
class Results(Device):
"""because bps.read() needs a Device or a Signal)"""
tune_ok = Component(Signal)
initial_position = Component(Signal)
final_position = Component(Signal)
center = Component(Signal)
# - - - - -
x = Component(Signal)
y = Component(Signal)
cen = Component(Signal)
com = Component(Signal)
fwhm = Component(Signal)
min = Component(Signal)
max = Component(Signal)
crossings = Component(Signal)
peakstats_attrs = "x y cen com fwhm min max crossings".split()
def report(self):
keys = self.peakstats_attrs + "tune_ok center initial_position final_position".split()
for key in keys:
print("{} : {}".format(key, getattr(self, key).value))
@bpp.subs_decorator(self.peaks)
def _scan(md=None):
yield from bps.open_run(md)
position_list = np.linspace(start, finish, num)
signal_list = list(self.signals)
signal_list += [self.axis,]
for pos in position_list:
yield from bps.mv(self.axis, pos)
yield from bps.trigger_and_read(signal_list)
final_position = initial_position
if self.peak_detected():
self.tune_ok = True
if self.peak_choice == "cen":
final_position = self.peaks.cen
elif self.peak_choice == "com":
final_position = self.peaks.com
else:
final_position = None
self.center = final_position
# add stream with results
# yield from add_results_stream()
stream_name = "PeakStats"
results = Results(name=stream_name)
for key in "tune_ok center".split():
getattr(results, key).put(getattr(self, key))
results.final_position.put(final_position)
results.initial_position.put(initial_position)
for key in results.peakstats_attrs:
v = getattr(self.peaks, key)
if key in ("crossings", "min", "max"):
v = np.array(v)
getattr(results, key).put(v)
yield from bps.create(name=stream_name)
yield from bps.read(results)
yield from bps.save()
yield from bps.mv(self.axis, final_position)
self.stats.append(self.peaks)
yield from bps.close_run()
results.report()
return (yield from _scan(md=_md))
def multi_pass_tune(self, width=None, step_factor=None,
num=None, pass_max=None, snake=None, md=None):
"""
BlueSky plan for tuning this axis with this signal
Execute multiple passes to refine the centroid determination.
Each subsequent pass will reduce the width of scan by ``step_factor``.
If ``snake=True`` then the scan direction will reverse with
each subsequent pass.
PARAMETERS
width : float
width of the tuning scan in the units of ``self.axis``
Default value in ``self.width`` (initially 1)
num : int
number of steps
Default value in ``self.num`` (initially 10)
step_factor : float
This reduces the width of the next tuning scan by the given factor.
Default value in ``self.step_factor`` (initially 4)
pass_max : int
Maximum number of passes to be executed (avoids runaway
scans when a centroid is not found).
Default value in ``self.pass_max`` (initially 10)
snake : bool
If ``True``, reverse scan direction on next pass.
Default value in ``self.snake`` (initially True)
md : dict, optional
metadata
"""
width = width or self.width
num = num or self.num
step_factor = step_factor or self.step_factor
snake = snake or self.snake
pass_max = pass_max or self.pass_max
self.stats = []
def _scan(width=1, step_factor=10, num=10, snake=True):
for _pass_number in range(pass_max):
_md = {'pass': _pass_number+1,
'pass_max': pass_max,
'plan_name': self.__class__.__name__ + '.multi_pass_tune',
}
_md.update(md or {})
yield from self.tune(width=width, num=num, md=_md)
if not self.tune_ok:
return
width /= step_factor
if snake:
width *= -1
return (
yield from _scan(
width=width, step_factor=step_factor, num=num, snake=snake))
def peak_detected(self):
"""
returns True if a peak was detected, otherwise False
The default algorithm identifies a peak when the maximum
value is four times the minimum value. Change this routine
by subclassing :class:`TuneAxis` and override :meth:`peak_detected`.
"""
if self.peaks is None:
return False
self.peaks.compute()
if self.peaks.max is None:
return False
ymax = self.peaks.max[-1]
ymin = self.peaks.min[-1]
return ymax > 4*ymin # this works for USAXS@APS
def tune_axes(axes):
"""
BlueSky plan to tune a list of axes in sequence
EXAMPLE
Sequentially, tune a list of preconfigured axes::
RE(tune_axes([mr, m2r, ar, a2r])
"""
for axis in axes:
yield from axis.tune()
class ProcedureRegistry(ophyd.Device):
"""
Procedure Registry
.. caution:: This Device may be relocated or removed entirely in future releases.
Its use is complicated and could lead to instability.
With many instruments, such as USAXS, there are several operating
modes to be used, each with its own setup code. This ophyd Device
should coordinate those modes so that the setup procedures can be called
either as part of a Bluesky plan or from the command line directly.
Assumes that users will write functions to setup a particular
operation or operating mode. The user-written functions may not
be appropriate to use in a plan directly since they might
make blocking calls. The ProcedureRegistry will call the function
in a thread (which is allowed to make blocking calls) and wait
for the thread to complete.
It is assumed that each user-written function will not return until
it is complete.
.. autosummary::
~dir
~add
~remove
~set
~put
EXAMPLE::
use_mode = ProcedureRegistry(name="use_mode")
def clearScalerNames():
for ch in scaler.channels.configuration_attrs:
if ch.find(".") < 0:
chan = scaler.channels.__getattribute__(ch)
chan.chname.put("")
def setMyScalerNames():
scaler.channels.chan01.chname.put("clock")
scaler.channels.chan02.chname.put("I0")
scaler.channels.chan03.chname.put("detector")
def useMyScalerNames(): # Bluesky plan
yield from bps.mv(
m1, 5,
use_mode, "clear",
)
yield from bps.mv(
m1, 0,
use_mode, "set",
)
def demo():
print(1)
m1.move(5)
print(2)
time.sleep(2)
print(3)
m1.move(0)
print(4)
use_mode.add(demo)
use_mode.add(clearScalerNames, "clear")
use_mode.add(setMyScalerNames, "set")
# use_mode.set("demo")
# use_mode.set("clear")
# RE(useMyScalerNames())
"""
busy = ophyd.Component(ophyd.Signal, value=False)
registry = {}
delay_s = 0
timeout_s = None
state = "__created__"
@property
def dir(self):
"""tuple of procedure names"""
return tuple(sorted(self.registry.keys()))
def add(self, procedure, proc_name=None):
"""
add procedure to registry
"""
#if procedure.__class__ == "function":
nm = proc_name or procedure.__name__
self.registry[nm] = procedure
def remove(self, procedure):
"""
remove procedure from registry
"""
if isinstance(procedure, str):
nm = procedure
else:
nm = procedure.__name__
if nm in self.registry:
del self.registry[nm]
def set(self, proc_name):
"""
run procedure in a thread, return once it is complete
proc_name (str) : name of registered procedure to be run
"""
if not isinstance(proc_name, str):
raise ValueError("expected a procedure name, not {}".format(proc_name))
if proc_name not in self.registry:
raise KeyError("unknown procedure name: "+proc_name)
if self.busy.value:
raise RuntimeError("busy now")
self.state = "__busy__"
status = ophyd.DeviceStatus(self)
@run_in_thread
def run_and_delay():
self.busy.put(True)
self.registry[proc_name]()
# optional delay
if self.delay_s > 0:
time.sleep(self.delay_s)
self.busy.put(False)
status._finished(success=True)
run_and_delay()
ophyd.status.wait(status, timeout=self.timeout_s)
self.state = proc_name
return status
def put(self, value): # TODO: risky?
"""replaces ophyd Device default put() behavior"""
self.set(value) | APS-BlueSky-tools | /APS_BlueSky_tools-2019.103.0.tar.gz/APS_BlueSky_tools-2019.103.0/APS_BlueSky_tools/plans.py | plans.py |
import datetime
import logging
import pyRestTable
from bluesky.callbacks.core import CallbackBase
logger = logging.getLogger(__name__).addHandler(logging.NullHandler())
def document_contents_callback(key, doc):
"""
prints document contents -- use for diagnosing a document stream
"""
print(key)
for k, v in doc.items():
print(f"\t{k}\t{v}")
class DocumentCollectorCallback(object):
"""
BlueSky callback to collect *all* documents from most-recent plan
Will reset when it receives a *start* document.
EXAMPLE::
from APS_BlueSky_tools.callbacks import DocumentCollector
doc_collector = DocumentCollectorCallback()
RE.subscribe(doc_collector.receiver)
...
RE(some_plan())
print(doc_collector.uids)
print(doc_collector.documents["stop"])
"""
data_event_names = "descriptor event resource datum bulk_events".split()
def __init__(self):
self.documents = {} # key: name, value: document
self.uids = [] # chronological list of UIDs as-received
def receiver(self, key, document):
"""keep all documents from recent plan in memory"""
uid = document.get("uid") or document.get("datum_id")
if "uid" is None:
raise KeyError("No uid in '{}' document".format(key))
self.uids.append(uid)
logger = logging.getLogger(__name__)
logger.debug("%s document uid=%s", key, str(uid))
if key == "start":
self.documents = {key: document}
elif key in self.data_event_names:
if key not in self.documents:
self.documents[key] = []
self.documents[key].append(document)
elif key == "stop":
self.documents[key] = document
print("exit status:", document["exit_status"])
for item in self.data_event_names:
if item in self.documents:
print(
"# {}(s):".format(item),
len(self.documents[item])
)
else:
txt = "custom_callback encountered: %s\n%s"
logger.warning(txt, key, document)
if key not in self.documents:
self.documents[key] = []
self.documents[key].append(document)
return
class SnapshotReport(CallbackBase):
"""
show the data from a ``APS_BlueSky_Tools.plans.snapshot()``
Find most recent snapshot between certain dates::
headers = db(plan_name="snapshot", since="2018-12-15", until="2018-12-21")
h = list(headers)[0] # pick the first one, it's the most recent
APS_BlueSky_Tools.callbacks.SnapshotReport().print_report(h)
Use as callback to a snapshot plan::
RE(
APS_BlueSky_Tools.plans.snapshot(ophyd_objects_list),
APS_BlueSky_Tools.callbacks.SnapshotReport()
)
"""
xref = None
def start(self, doc):
if doc.get("plan_name", "nope") == "snapshot":
self.xref = {} # key=source, value=dict(value, iso8601 timestamp)
else:
self.xref = None
def descriptor(self, doc):
"""
special case:
the data is both in the descriptor AND the event docs
due to the way our plan created it
"""
if self.xref is None: # not from a snapshot plan
return
for k, v in doc["configuration"].items():
ts = v["timestamps"][k]
dt = datetime.datetime.fromtimestamp(ts).isoformat().replace("T", " ")
pvname = v["data_keys"][k]["source"]
value = v["data"][k]
self.xref[pvname] = dict(value=value, timestamp=dt)
def stop(self, doc):
if self.xref is None: # not from a snapshot plan
return
t = pyRestTable.Table()
t.addLabel("timestamp")
t.addLabel("source")
t.addLabel("name")
t.addLabel("value")
for k, v in sorted(self.xref.items()):
p = k.find(":")
t.addRow((v["timestamp"], k[:p], k[p+1:], v["value"]))
print(t)
for k, v in sorted(doc.items()):
print(f"{k}: {v}")
def print_report(self, header):
"""
simplify the job of writing our custom data table
method: play the entire document stream through this callback
"""
print()
print("="*40)
print("snapshot:", header.start["iso8601"])
print("="*40)
print()
for k, v in sorted(header.start.items()):
print(f"{k}: {v}")
print()
for key, doc in header.documents():
self(key, doc)
print() | APS-BlueSky-tools | /APS_BlueSky_tools-2019.103.0.tar.gz/APS_BlueSky_tools-2019.103.0/APS_BlueSky_tools/callbacks.py | callbacks.py |
from collections import OrderedDict
from datetime import datetime
import getpass
import logging
import os
import socket
import time
logger = logging.getLogger(__name__).addHandler(logging.NullHandler())
# Programmer's Note: subclassing from `object` avoids the need
# to import `bluesky.callbacks.core.CallbackBase`.
# One less import when only accessing the Databroker.
# The *only* advantage to subclassing from CallbackBase
# seems to be a simpler setup call to RE.subscribe().
#
# superclass | subscription code
# ------------ | -------------------------------
# object | RE.subscribe(specwriter.receiver)
# CallbackBase | RE.subscribe(specwriter)
SPEC_TIME_FORMAT = "%a %b %d %H:%M:%S %Y"
SCAN_ID_RESET_VALUE = 1
def _rebuild_scan_command(doc):
"""reconstruct the scan command for SPEC data file #S line"""
def get_name(src):
"""
get name field from object representation
given: EpicsMotor(prefix='xxx:m1', name='m1', settle_time=0.0,
timeout=None, read_attrs=['user_readback', 'user_setpoint'],
configuration_attrs=['motor_egu', 'velocity', 'acceleration',
'user_offset', 'user_offset_dir'])
return: "m1"
"""
s = str(src)
p = s.find("(")
if p > 0: # only if an open parenthesis is found
parts = s[p+1:].rstrip(")").split(",")
for item in parts:
# should be key=value pairs
item = item.strip()
p = item.find("=")
if item[:p] == "name":
s = item[p+1:] # get the name value
break
return s
s = []
if "plan_args" in doc:
for _k, _v in doc['plan_args'].items():
if _k == "detectors":
_v = doc[_k]
elif _k.startswith("motor"):
_v = doc["motors"]
elif _k == "args":
_v = "[" + ", ".join(map(get_name, _v)) + "]"
s.append("{}={}".format(_k, _v))
cmd = "{}({})".format(doc.get("plan_name", ""), ", ".join(s))
scan_id = doc.get("scan_id") or 1 # TODO: improve the `1` default
return "{} {}".format(scan_id, cmd)
class SpecWriterCallback(object):
"""
collect data from BlueSky RunEngine documents to write as SPEC data
This gathers data from all documents and appends scan to the file
when the *stop* document is received.
Parameters
filename : string, optional
Local, relative or absolute name of SPEC data file to be used.
If `filename=None`, defaults to format of YYYmmdd-HHMMSS.dat
derived from the current system time.
auto_write : boolean, optional
If True (default), `write_scan()` is called when *stop* document
is received.
If False, the caller is responsible for calling `write_scan()`
before the next *start* document is received.
User Interface methods
.. autosummary::
~receiver
~newfile
~usefile
~make_default_filename
~clear
~prepare_scan_contents
~write_scan
Internal methods
.. autosummary::
~write_header
~start
~descriptor
~event
~bulk_events
~datum
~resource
~stop
"""
def __init__(self, filename=None, auto_write=True):
self.clear()
self.spec_filename = filename
self.auto_write = auto_write
self.uid_short_length = 8
self.write_file_header = False
self.spec_epoch = None # for both #E & #D line in header, also offset for all scans
self.spec_host = None
self.spec_user = None
self._datetime = None # most recent document time as datetime object
self._streams = {} # descriptor documents, keyed by uid
if filename is None or not os.path.exists(filename):
self.newfile(filename)
else:
last_scan_id = self.usefile(filename) # TODO: set RE's scan_id based on result?
def clear(self):
"""reset all scan data defaults"""
self.uid = None
self.scan_epoch = None # absolute epoch to report in scan #D line
self.time = None # full time from document
self.comments = dict(start=[], event=[], descriptor=[], resource=[], datum=[], stop=[])
self.data = OrderedDict() # data in the scan
self.detectors = OrderedDict() # names of detectors in the scan
self.hints = OrderedDict() # why?
self.metadata = OrderedDict() # #MD lines in header
self.motors = OrderedDict() # names of motors in the scan
self.positioners = OrderedDict() # names in #O, values in #P
self.num_primary_data = 0
#
# note: for one scan, #O & #P information is not provided
# unless collecting baseline data
# wait for case with baseline data that needs #O/#P lines
#
self.columns = OrderedDict() # #L in scan
self.scan_command = None # #S line
def _cmt(self, key, text):
"""enter a comment"""
ts = datetime.strftime(self._datetime, SPEC_TIME_FORMAT)
self.comments[key].append("{}. {}".format(ts, text))
def receiver(self, key, document):
"""BlueSky callback: receive all documents for handling"""
xref = dict(
start = self.start,
descriptor = self.descriptor,
event = self.event,
bulk_events = self.bulk_events,
datum = self.datum,
resource = self.resource,
stop = self.stop,
)
logger = logging.getLogger(__name__)
if key in xref:
uid = document.get("uid") or document.get("datum_id")
logger.debug("%s document, uid=%s", key, str(uid))
ts = document.get("time")
if ts is None:
ts = datetime.now()
else:
ts = datetime.fromtimestamp(document["time"])
self._datetime = ts
xref[key](document)
else:
msg = "custom_callback encountered: {} : {}".format(key, document)
# raise ValueError(msg)
logger.warning(msg)
def start(self, doc):
"""handle *start* documents"""
known_properties = """
uid time project sample scan_id group owner
detectors hints
plan_type plan_name plan_args
""".split()
self.clear()
self.uid = doc["uid"]
self._cmt("start", "uid = {}".format(self.uid))
self.time = doc["time"]
self.scan_epoch = int(self.time)
self.scan_id = doc["scan_id"] or 0
# Which reference? fixed counting time or fixed monitor count?
# Can this be omitted?
self.T_or_M = None # for now
# self.T_or_M = "T" # TODO: how to get this from the document stream?
# self.T_or_M_value = 1
# self._cmt("start", "!!! #T line not correct yet !!!")
# metadata
for key in sorted(doc.keys()):
if key not in known_properties:
self.metadata[key] = doc[key]
# various dicts
for item in "detectors hints motors".split():
if item in doc:
obj = self.__getattribute__(item)
for key in doc.get(item):
obj[key] = None
cmt = "plan_type = " + doc["plan_type"]
ts = datetime.strftime(self._datetime, SPEC_TIME_FORMAT)
self.comments["start"].insert(0, "{}. {}".format(ts, cmt))
self.scan_command = _rebuild_scan_command(doc)
def descriptor(self, doc):
"""
handle *descriptor* documents
prepare for primary scan data, ignore any other data stream
"""
# TODO: log descriptor documents by uid
# for reference from event and bulk_events documents
if doc["uid"] in self._streams:
fmt = "duplicate descriptor UID {} found"
raise KeyError(fmt.format(doc["uid"]))
# log descriptor documents by uid
# referenced by event and bulk_events documents
self._streams[doc["uid"]] = doc
if doc["name"] == "primary":
doc_data_keys = list(doc["data_keys"].keys())
self.data.update({k: [] for k in sorted(doc_data_keys)})
self.data["Epoch"] = []
self.data["Epoch_float"] = []
# SPEC data files have implied defaults
# SPEC default: X axis in 1st column and Y axis in last column
_at_last = len(self.motors) > 0
self.data.move_to_end("Epoch_float", last=_at_last)
self.data.move_to_end("Epoch")
# TODO: move motors to first
# TODO: move detectors to last
if len(self.motors) > 0:
# find 1st motor and move to last
motor_name = list(self.motors.keys())[0]
self.data.move_to_end(motor_name, last=False)
# monitor (detector) in next to last position
# but how can we get that name here?
if len(self.detectors) > 0:
# find 1st detector and move to last
det_name = list(self.detectors.keys())[0]
if det_name not in self.data and len(doc_data_keys) > 0:
det_name = doc_data_keys[0]
if det_name in self.data:
self.data.move_to_end(det_name)
def event(self, doc):
"""
handle *event* documents
"""
stream_doc = self._streams.get(doc["descriptor"])
if stream_doc is None:
fmt = "descriptor UID {} not found"
raise KeyError(fmt.format(doc["descriptor"]))
if stream_doc["name"] == "primary":
for k in doc["data"].keys():
if k not in self.data.keys():
fmt = "unexpected failure here, key {} not found"
raise KeyError(fmt.format(k))
#return # not our expected event data
for k in self.data.keys():
if k == "Epoch":
v = int(doc["time"] - self.time + 0.5)
elif k == "Epoch_float":
v = doc["time"] - self.time
else:
v = doc["data"][k]
self.data[k].append(v)
self.num_primary_data += 1
def bulk_events(self, doc):
"""handle *bulk_events* documents"""
pass
def datum(self, doc):
"""handle *datum* documents"""
self._cmt("datum", "datum " + str(doc))
def resource(self, doc):
"""handle *resource* documents"""
self._cmt("resource", "resource " + str(doc))
def stop(self, doc):
"""handle *stop* documents"""
if "num_events" in doc:
for k, v in doc["num_events"].items():
self._cmt("stop", "num_events_{} = {}".format(k, v))
if "exit_status" in doc:
self._cmt("stop", "exit_status = " + doc["exit_status"])
else:
self._cmt("stop", "exit_status = not available")
if self.auto_write:
self.write_scan()
def prepare_scan_contents(self):
"""
format the scan for a SPEC data file
:returns: [str] a list of lines to append to the data file
"""
dt = datetime.fromtimestamp(self.scan_epoch)
lines = []
lines.append("")
lines.append("#S " + self.scan_command)
lines.append("#D " + datetime.strftime(dt, SPEC_TIME_FORMAT))
if self.T_or_M is not None:
lines.append("#{} {}".format(self.T_or_M, self.T_or_M_value))
for v in self.comments["start"]:
#C Wed Feb 03 16:51:38 2016. do ./usaxs.mac.
lines.append("#C " + v) # TODO: add time/date stamp as SPEC does
for v in self.comments["descriptor"]:
lines.append("#C " + v)
for k, v in self.metadata.items():
# "#MD" is our ad hoc SPEC data tag
lines.append("#MD {} = {}".format(k, v))
lines.append("#N " + str(self.num_primary_data))
if len(self.data.keys()) > 0:
lines.append("#L " + " ".join(self.data.keys()))
for i in range(self.num_primary_data):
str_data = OrderedDict()
s = []
for k in self.data.keys():
datum = self.data[k][i]
if isinstance(datum, str):
# SPEC scan data is expected to be numbers
# this is text, substitute the row number
# and report after this line in a #U line
str_data[k] = datum
datum = i
s.append(str(datum))
lines.append(" ".join(s))
for k in str_data.keys():
# report the text data
lines.append("#U {} {} {}".format(i, k, str_data[k]))
else:
lines.append("#C no data column labels identified")
for v in self.comments["event"]:
lines.append("#C " + v)
for v in self.comments["resource"]:
lines.append("#C " + v)
for v in self.comments["datum"]:
lines.append("#C " + v)
for v in self.comments["stop"]:
lines.append("#C " + v)
return lines
def _write_lines_(self, lines, mode="a"):
"""write (more) lines to the file"""
with open(self.spec_filename, mode) as f:
f.write("\n".join(lines))
def write_header(self):
"""write the header section of a SPEC data file"""
dt = datetime.fromtimestamp(self.spec_epoch)
lines = []
lines.append("#F " + self.spec_filename)
lines.append("#E " + str(self.spec_epoch))
lines.append("#D " + datetime.strftime(dt, SPEC_TIME_FORMAT))
lines.append("#C " + "BlueSky user = {} host = {}".format(self.spec_user, self.spec_host))
lines.append("")
if os.path.exists(self.spec_filename):
raise IOError("file ({}) exists".format(self.spec_filename))
self._write_lines_(lines, mode="w")
self.write_file_header = False
def write_scan(self):
"""
write the most recent (completed) scan to the file
* creates file if not existing
* writes header if needed
* appends scan data
note: does nothing if there are no lines to be written
"""
if os.path.exists(self.spec_filename):
with open(self.spec_filename) as f:
buf = f.read()
if buf.find(self.uid) >= 0:
# raise exception if uid is already in the file!
fmt = "{} already contains uid={}"
raise ValueError(fmt.format(self.spec_filename, self.uid))
logger = logging.getLogger(__name__)
lines = self.prepare_scan_contents()
lines.append("")
if lines is not None:
if self.write_file_header:
self.write_header()
logger.info("wrote header to SPEC file: %s", self.spec_filename)
self._write_lines_(lines, mode="a")
logger.info("wrote scan %d to SPEC file: %s", self.scan_id, self.spec_filename)
def make_default_filename(self):
"""generate a file name to be used as default"""
now = datetime.now()
return datetime.strftime(now, "%Y%m%d-%H%M%S")+".dat"
def newfile(self, filename=None, reset_scan_id=False, RE=None):
"""
prepare to use a new SPEC data file
but don't create it until we have data
"""
self.clear()
filename = filename or self.make_default_filename()
if os.path.exists(filename):
ValueError("file {} exists".format(filename))
self.spec_filename = filename
self.spec_epoch = int(time.time()) # ! no roundup here!!!
self.spec_host = socket.gethostname() or 'localhost'
self.spec_user = getpass.getuser() or 'BlueSkyUser'
self.write_file_header = True # don't write the file yet
if reset_scan_id and RE is not None:
# assume isinstance(RE, bluesky.run_engine.RunEngine)
RE.md["scan_id"] = SCAN_ID_RESET_VALUE
print("scan ID set to {}".format(SCAN_ID_RESET_VALUE))
return self.spec_filename
def usefile(self, filename):
"""read from existing SPEC data file"""
if not os.path.exists(self.spec_filename):
IOError("file {} does not exist".format(filename))
scan_id = None
with open(filename, "r") as f:
key = "#F"
line = f.readline().strip()
if not line.startswith(key+" "):
raise ValueError("first line does not start with "+key)
key = "#E"
line = f.readline().strip()
if not line.startswith(key+" "):
raise ValueError("first line does not start with "+key)
epoch = int(line.split()[-1])
key = "#D"
line = f.readline().strip()
if not line.startswith(key+" "):
raise ValueError("first line does not start with "+key)
# ignore content, it is derived from #E line
key = "#C"
line = f.readline().strip()
if not line.startswith(key+" "):
raise ValueError("first line does not start with "+key)
p = line.split()
username = "BlueSkyUser"
if len(p) > 4 and p[2] == "user":
username = p[4]
# find the last scan number used
key = "#S"
for line in f.readlines():
if line.startswith(key+" ") and len(line.split())>1:
scan_id = int(line.split()[1])
self.spec_filename = filename
self.spec_epoch = epoch
self.spec_user = username
return scan_id | APS-BlueSky-tools | /APS_BlueSky_tools-2019.103.0.tar.gz/APS_BlueSky_tools-2019.103.0/APS_BlueSky_tools/filewriters.py | filewriters.py |
from collections import OrderedDict
from email.mime.text import MIMEText
import logging
import math
import os
import pandas
import pyRestTable
import smtplib
import subprocess
import time
HOME_PATH = os.path.dirname(__file__)
logger = logging.getLogger(__name__)
def text_encode(source):
"""encode ``source`` using the default codepoint"""
return source.encode(errors='ignore')
def unix_cmd(command_list):
"""run a UNIX command, returns (stdout, stderr)"""
process = subprocess.Popen(command_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
return stdout, stderr
def to_unicode_or_bust(obj, encoding='utf-8'):
"""from: http://farmdev.com/talks/unicode/"""
if isinstance(obj, str):
if not isinstance(obj, str):
obj = str(obj, encoding)
return obj
def connect_pvlist(pvlist, wait=True, timeout=2, poll_interval=0.1):
"""
given a list of EPICS PV names, return a dictionary of EpicsSignal objects
PARAMETERS
pvlist : list(str)
list of EPICS PV names
wait : bool
should wait for EpicsSignal objects to connect, default: True
timeout : float
maximum time to wait for PV connections, seconds, default: 2.0
poll_interval : float
time to sleep between checks for PV connections, seconds, default: 0.1
"""
from ophyd import EpicsSignal
obj_dict = OrderedDict()
for item in pvlist:
if len(item.strip()) == 0:
continue
pvname = item.strip()
oname = "signal_{}".format(len(obj_dict))
obj = EpicsSignal(pvname, name=oname)
obj_dict[oname] = obj
if wait:
times_up = time.time() + min(0, timeout)
poll_interval = min(0.01, poll_interval)
waiting = True
while waiting and time.time() < times_up:
time.sleep(poll_interval)
waiting = False in [o.connected for o in obj_dict.values()]
if waiting:
n = OrderedDict()
for k, v in obj_dict.items():
if v.connected:
n[k] = v
else:
print(f"Could not connect {v.pvname}")
if len(n) == 0:
raise RuntimeError("Could not connect any PVs in the list")
obj_dict = n
return obj_dict
class EmailNotifications(object):
"""
send email notifications when requested
use default OS mail utility (so no credentials needed)
"""
def __init__(self, sender=None):
self.addresses = []
self.notify_on_feedback = True
self.sender = sender or "nobody@localhost"
self.smtp_host = "localhost"
def add_addresses(self, *args):
for address in args:
self.addresses.append(address)
def send(self, subject, message):
"""send ``message`` to all addresses"""
msg = MIMEText(message)
msg['Subject'] = subject
msg['From'] = self.sender
msg['To'] = ",".join(self.addresses)
s = smtplib.SMTP(self.smtp_host)
s.sendmail(self.sender, self.addresses, msg.as_string())
s.quit()
class ExcelDatabaseFileBase(object):
"""
base class: read-only support for Excel files, treat them like databases
EXAMPLE
Show how to read an Excel file where one of the columns
contains a unique key. This allows for random access to
each row of data by use of the *key*.
::
class ExhibitorsDB(ExcelDatabaseFileBase):
'''
content for Exhibitors, vendors, and Sponsors from the Excel file
'''
EXCEL_FILE = os.path.join("resources", "exhibitors.xlsx")
LABELS_ROW = 2
def handle_single_entry(self, entry):
'''any special handling for a row from the Excel file'''
pass
def handleExcelRowEntry(self, entry):
'''identify the unique key for this entry (row of the Excel file)'''
key = entry["Name"]
self.db[key] = entry
"""
EXCEL_FILE = None # subclass MUST define
# EXCEL_FILE = os.path.join("abstracts", "index of abstracts.xlsx")
LABELS_ROW = 3 # labels are on line LABELS_ROW+1 in the Excel file
def __init__(self):
self.db = OrderedDict()
self.data_labels = None
if self.EXCEL_FILE is None:
raise ValueError("subclass must define EXCEL_FILE")
self.fname = os.path.join(HOME_PATH, self.EXCEL_FILE)
self.parse()
def handle_single_entry(self, entry): # subclass MUST override
raise NotImplementedError("subclass must override handle_single_entry() method")
def handleExcelRowEntry(self, entry): # subclass MUST override
raise NotImplementedError("subclass must override handleExcelRowEntry() method")
def parse(self, labels_row_num=None, data_start_row_num=None):
labels_row_num = labels_row_num or self.LABELS_ROW
xl = pandas.read_excel(self.fname, sheet_name=0, header=None)
self.data_labels = list(xl.iloc[labels_row_num,:])
data_start_row_num = data_start_row_num or labels_row_num+1
grid = xl.iloc[data_start_row_num:,:]
# grid is a pandas DataFrame
# logger.info(type(grid))
# logger.info(grid.iloc[:,1])
for row_number, _ignored in enumerate(grid.iloc[:,0]):
row_data = grid.iloc[row_number,:]
entry = {}
for _col, label in enumerate(self.data_labels):
entry[label] = self._getExcelColumnValue(row_data, _col)
self.handle_single_entry(entry)
self.handleExcelRowEntry(entry)
def _getExcelColumnValue(self, row_data, col):
v = row_data.values[col]
if self._isExcel_nan(v):
v = None
else:
v = to_unicode_or_bust(v)
if isinstance(v, str):
v = v.strip()
return v
def _isExcel_nan(self, value):
if not isinstance(value, float):
return False
return math.isnan(value)
class ExcelDatabaseFileGeneric(ExcelDatabaseFileBase):
"""
Generic (read-only) handling of Excel spreadsheet-as-database
Table labels are given on Excel row ``N``, ``self.labels_row = N-1``
"""
def __init__(self, filename, labels_row=3):
self._index_ = 0
self.EXCEL_FILE = self.EXCEL_FILE or filename
self.LABELS_ROW = labels_row
ExcelDatabaseFileBase.__init__(self)
def handle_single_entry(self, entry):
pass
def handleExcelRowEntry(self, entry):
"""use row number as the unique key"""
key = str(self._index_)
self.db[key] = entry
self._index_ += 1
def ipython_profile_name():
"""
return the name of the current ipython profile or `None`
Example (add to default RunEngine metadata)::
RE.md['ipython_profile'] = str(ipython_profile_name())
print("using profile: " + RE.md['ipython_profile'])
"""
from IPython import get_ipython
return get_ipython().profile
def print_snapshot_list(db, **search_criteria):
"""
print (stdout) a list of all snapshots in the databroker
USAGE::
print_snapshot_list(db, )
print_snapshot_list(db, purpose="this is an example")
print_snapshot_list(db, since="2018-12-21", until="2019")
EXAMPLE::
In [16]: from APS_BlueSky_tools.utils import print_snapshot_list
...: from APS_BlueSky_tools.callbacks import SnapshotReport
...: print_snapshot_list(db, since="2018-12-21", until="2019")
...:
= ======== ========================== ==================
# uid date/time purpose
= ======== ========================== ==================
0 d7831dae 2018-12-21 11:39:52.956904 this is an example
1 5049029d 2018-12-21 11:39:30.062463 this is an example
2 588e0149 2018-12-21 11:38:43.153055 this is an example
= ======== ========================== ==================
In [17]: SnapshotReport().print_report(db["5049029d"])
========================================
snapshot: 2018-12-21 11:39:30.062463
========================================
example: example 2
hints: {}
iso8601: 2018-12-21 11:39:30.062463
look: can snapshot text and arrays too
note: no commas in metadata
plan_description: archive snapshot of ophyd Signals (usually EPICS PVs)
plan_name: snapshot
plan_type: generator
purpose: this is an example
scan_id: 1
software_versions: {
'python':
'''3.6.2 |Continuum Analytics, Inc.| (default, Jul 20 2017, 13:51:32)
[GCC 4.4.7 20120313 (Red Hat 4.4.7-1)]''',
'PyEpics': '3.3.1',
'bluesky': '1.4.1',
'ophyd': '1.3.0',
'databroker': '0.11.3',
'APS_Bluesky_Tools': '0.0.38'
}
time: 1545413970.063167
uid: 5049029d-075c-453c-96d2-55431273852b
========================== ====== ================ ===================
timestamp source name value
========================== ====== ================ ===================
2018-12-20 18:24:34.220028 PV compress [0.1, 0.2, 0.3]
2018-12-13 14:49:53.121188 PV gov:HOSTNAME otz.aps.anl.gov
2018-12-21 11:39:24.268148 PV gov:IOC_CPU_LOAD 0.22522317161410768
2018-12-21 11:39:24.268151 PV gov:SYS_CPU_LOAD 9.109026666525944
2018-12-21 11:39:30.017643 PV gov:iso8601 2018-12-21T11:39:30
2018-12-13 14:49:53.135016 PV otz:HOSTNAME otz.aps.anl.gov
2018-12-21 11:39:27.705304 PV otz:IOC_CPU_LOAD 0.1251210270549924
2018-12-21 11:39:27.705301 PV otz:SYS_CPU_LOAD 11.611234438304471
2018-12-21 11:39:30.030321 PV otz:iso8601 2018-12-21T11:39:30
========================== ====== ================ ===================
exit_status: success
num_events: {'primary': 1}
run_start: 5049029d-075c-453c-96d2-55431273852b
time: 1545413970.102147
uid: 6c1b2100-1ef6-404d-943e-405da9ada882
"""
t = pyRestTable.Table()
t.addLabel("#")
t.addLabel("uid")
t.addLabel("date/time")
t.addLabel("purpose")
search_criteria["plan_name"] = "snapshot"
for i, h in enumerate(db(**search_criteria)):
uid = h.start["uid"].split("-")[0]
t.addRow((i, uid, h.start["iso8601"], h.start["purpose"]))
print(t) | APS-BlueSky-tools | /APS_BlueSky_tools-2019.103.0.tar.gz/APS_BlueSky_tools-2019.103.0/APS_BlueSky_tools/utils.py | utils.py |
import logging
import ophyd.sim
import numpy as np
logger = logging.getLogger(__name__).addHandler(logging.NullHandler())
class SynPseudoVoigt(ophyd.sim.SynSignal):
"""
Evaluate a point on a pseudo-Voigt based on the value of a motor.
Provides a signal to be measured.
Acts like a detector.
:see: https://en.wikipedia.org/wiki/Voigt_profile
PARAMETERS
name : str
name of detector signal
motor : `Mover`
The independent coordinate
motor_field : str
name of `Mover` field
center : float, optional
location of maximum value, default=0
eta : float, optional
0 <= eta < 1.0: Lorentzian fraction, default=0.5
scale : float, optional
scale >= 1 : scale factor, default=1
sigma : float, optional
sigma > 0 : width, default=1
bkg : float, optional
bkg >= 0 : constant background, default=0
noise : {'poisson', 'uniform', None}
Add noise to the result.
noise_multiplier : float
Only relevant for 'uniform' noise. Multiply the random amount of
noise by 'noise_multiplier'
EXAMPLE
::
from APS_BlueSky_tools.signals import SynPseudoVoigt
motor = Mover('motor', {'motor': lambda x: x}, {'x': 0})
det = SynPseudoVoigt('det', motor, 'motor',
center=0, eta=0.5, scale=1, sigma=1, bkg=0)
EXAMPLE
::
import numpy as np
from APS_BlueSky_tools.signals import SynPseudoVoigt
synthetic_pseudovoigt = SynPseudoVoigt(
'synthetic_pseudovoigt', m1, 'm1',
center=-1.5 + 0.5*np.random.uniform(),
eta=0.2 + 0.5*np.random.uniform(),
sigma=0.001 + 0.05*np.random.uniform(),
scale=1e5,
bkg=0.01*np.random.uniform())
# RE(bp.scan([synthetic_pseudovoigt], m1, -2, 0, 219))
"""
def __init__(self, name, motor, motor_field, center=0,
eta=0.5, scale=1, sigma=1, bkg=0,
noise=None, noise_multiplier=1,
**kwargs):
if eta < 0.0 or eta > 1.0:
raise ValueError("eta={} must be between 0 and 1".format(eta))
if scale < 1.0:
raise ValueError("scale must be >= 1")
if sigma <= 0.0:
raise ValueError("sigma must be > 0")
if bkg < 0.0:
raise ValueError("bkg must be >= 0")
# remember these terms for later access by user
self.name = name
self.motor = motor
self.center = center
self.eta = eta
self.scale = scale
self.sigma = sigma
self.bkg = bkg
self.noise = noise
self.noise_multiplier = noise_multiplier
def f_lorentzian(x, gamma):
#return gamma / np.pi / (x**2 + gamma**2)
return 1 / np.pi / gamma / (1 + (x/gamma)**2)
def f_gaussian(x, sigma):
numerator = np.exp(-0.5 * (x / sigma) ** 2)
denominator = sigma * np.sqrt(2 * np.pi)
return numerator / denominator
def pvoigt():
m = motor.read()[motor_field]['value']
g_max = f_gaussian(0, sigma) # peak normalization
l_max = f_lorentzian(0, sigma)
v = bkg
if eta > 0:
v += eta * f_lorentzian(m - center, sigma) / l_max
if eta < 1:
v += (1-eta) * f_gaussian(m - center, sigma) / g_max
v *= scale
if noise == 'poisson':
v = int(np.random.poisson(np.round(v), 1))
elif noise == 'uniform':
v += np.random.uniform(-1, 1) * noise_multiplier
return v
super().__init__(name=name, func=pvoigt, **kwargs) | APS-BlueSky-tools | /APS_BlueSky_tools-2019.103.0.tar.gz/APS_BlueSky_tools-2019.103.0/APS_BlueSky_tools/signals.py | signals.py |
# Copyright (c) 2017-2018, UChicago Argonne, LLC. See LICENSE file.
from collections import OrderedDict
from datetime import datetime
import epics
import itertools
import numpy as np
import threading
import time
from .synApps_ophyd import *
from . import plans as APS_plans
import ophyd
from ophyd import Component, Device, DeviceStatus, FormattedComponent
from ophyd import Signal, EpicsMotor, EpicsSignal, EpicsSignalRO
from ophyd.scaler import EpicsScaler, ScalerCH
from ophyd.areadetector.filestore_mixins import FileStoreHDF5
from ophyd.areadetector.filestore_mixins import FileStoreBase
from ophyd.areadetector.filestore_mixins import FileStorePluginBase
from ophyd.areadetector.filestore_mixins import FileStoreIterativeWrite
from ophyd import HDF5Plugin
from ophyd.utils import set_and_wait
def use_EPICS_scaler_channels(scaler):
"""
configure scaler for only the channels with names assigned in EPICS
"""
if isinstance(scaler, EpicsScaler):
import epics
read_attrs = []
for ch in scaler.channels.component_names:
_nam = epics.caget("{}.NM{}".format(scaler.prefix, int(ch[4:])))
if len(_nam.strip()) > 0:
read_attrs.append(ch)
scaler.channels.read_attrs = read_attrs
elif isinstance(scaler, ScalerCH):
read_attrs = []
for ch in scaler.channels.component_names:
nm_pv = scaler.channels.__getattribute__(ch)
if nm_pv is not None and len(nm_pv.chname.value.strip()) > 0:
read_attrs.append(ch)
scaler.channels.read_attrs = read_attrs
class ApsOperatorMessagesDevice(Device):
"""general messages from the APS main control room"""
operators = Component(EpicsSignalRO, "OPS:message1", string=True)
floor_coordinator = Component(EpicsSignalRO, "OPS:message2", string=True)
fill_pattern = Component(EpicsSignalRO, "OPS:message3", string=True)
last_problem_message = Component(EpicsSignalRO, "OPS:message4", string=True)
last_trip_message = Component(EpicsSignalRO, "OPS:message5", string=True)
# messages 6-8: meaning?
message6 = Component(EpicsSignalRO, "OPS:message6", string=True)
message7 = Component(EpicsSignalRO, "OPS:message7", string=True)
message8 = Component(EpicsSignalRO, "OPS:message8", string=True)
class ApsMachineParametersDevice(Device):
"""
common operational parameters of the APS of general interest
USAGE::
import APS_BlueSky_tools.devices as APS_devices
APS = APS_devices.ApsMachineParametersDevice(name="APS")
aps_current = APS.current
# make sure these values are logged at start and stop of every scan
sd.baseline.append(APS)
# record storage ring current as secondary stream during scans
# name: aps_current_monitor
# db[-1].table("aps_current_monitor")
sd.monitors.append(aps_current)
The `sd.baseline` and `sd.monitors` usage relies on this global setup:
from bluesky import SupplementalData
sd = SupplementalData()
RE.preprocessors.append(sd)
.. autosummary::
~inUserOperations
"""
current = Component(EpicsSignalRO, "S:SRcurrentAI")
lifetime = Component(EpicsSignalRO, "S:SRlifeTimeHrsCC")
machine_status = Component(EpicsSignalRO, "S:DesiredMode", string=True)
# In [3]: APS.machine_status.enum_strs
# Out[3]:
# ('State Unknown',
# 'USER OPERATIONS',
# 'Bm Ln Studies',
# 'INJ Studies',
# 'ASD Studies',
# 'NO BEAM',
# 'MAINTENANCE')
operating_mode = Component(EpicsSignalRO, "S:ActualMode", string=True)
# In [4]: APS.operating_mode.enum_strs
# Out[4]:
# ('State Unknown',
# 'NO BEAM',
# 'Injecting',
# 'Stored Beam',
# 'Delivered Beam',
# 'MAINTENANCE')
shutter_permit = Component(EpicsSignalRO, "ACIS:ShutterPermit", string=True)
fill_number = Component(EpicsSignalRO, "S:FillNumber")
orbit_correction = Component(EpicsSignalRO, "S:OrbitCorrection:CC")
global_feedback = Component(EpicsSignalRO, "SRFB:GBL:LoopStatusBI", string=True)
global_feedback_h = Component(EpicsSignalRO, "SRFB:GBL:HLoopStatusBI", string=True)
global_feedback_v = Component(EpicsSignalRO, "SRFB:GBL:VLoopStatusBI", string=True)
operator_messages = Component(ApsOperatorMessagesDevice)
@property
def inUserOperations(self):
"""
determine if APS is in User Operations mode (boolean)
Use this property to configure ophyd Devices for direct or simulated hardware.
See issue #49 (https://github.com/BCDA-APS/APS_BlueSky_tools/issues/49) for details.
EXAMPLE::
APS = APS_BlueSky_tools.devices.ApsMachineParametersDevice(name="APS")
if APS.inUserOperations:
suspend_APS_current = bluesky.suspenders.SuspendFloor(APS.current, 2, resume_thresh=10)
RE.install_suspender(suspend_APS_current)
else:
# use pseudo shutter controls and no current suspenders
pass
"""
verdict = self.machine_status.value in (1, "USER OPERATIONS")
# verdict = verdict and self.operating_mode.value not in (5, "MAINTENANCE")
return verdict
class ApsPssShutter(Device):
"""
APS PSS shutter
* APS PSS shutters have separate bit PVs for open and close
* set either bit, the shutter moves, and the bit resets a short time later
* no indication that the shutter has actually moved from the bits
(see :func:`ApsPssShutterWithStatus()` for alternative)
USAGE::
shutter_a = ApsPssShutter("2bma:A_shutter", name="shutter")
shutter_a.open()
shutter_a.close()
shutter_a.set("open")
shutter_a.set("close")
When using the shutter in a plan, be sure to use ``yield from``.
::
def in_a_plan(shutter):
yield from abs_set(shutter, "open", wait=True)
# do something
yield from abs_set(shutter, "close", wait=True)
RE(in_a_plan(shutter_a))
The strings accepted by `set()` are defined in two lists:
`valid_open_values` and `valid_close_values`. These lists
are treated (internally to `set()`) as lower case strings.
Example, add "o" & "x" as aliases for "open" & "close":
shutter_a.valid_open_values.append("o")
shutter_a.valid_close_values.append("x")
shutter_a.set("o")
shutter_a.set("x")
"""
open_bit = Component(EpicsSignal, ":open")
close_bit = Component(EpicsSignal, ":close")
delay_s = 1.2
valid_open_values = ["open",] # lower-case strings ONLY
valid_close_values = ["close",]
busy = Signal(value=False, name="busy")
def open(self):
"""request shutter to open, interactive use"""
self.open_bit.put(1)
def close(self):
"""request shutter to close, interactive use"""
self.close_bit.put(1)
def set(self, value, **kwargs):
"""request the shutter to open or close, BlueSky plan use"""
# ensure numerical additions to lists are now strings
def input_filter(v):
return str(v).lower()
self.valid_open_values = list(map(input_filter, self.valid_open_values))
self.valid_close_values = list(map(input_filter, self.valid_close_values))
if self.busy.value:
raise RuntimeError("shutter is operating")
acceptables = self.valid_open_values + self.valid_close_values
if input_filter(value) not in acceptables:
msg = "value should be one of " + " | ".join(acceptables)
msg += " : received " + str(value)
raise ValueError(msg)
status = DeviceStatus(self)
def move_shutter():
if input_filter(value) in self.valid_open_values:
self.open() # no need to yield inside a thread
elif input_filter(value) in self.valid_close_values:
self.close()
def run_and_delay():
self.busy.put(True)
move_shutter()
# sleep, since we don't *know* when the shutter has moved
time.sleep(self.delay_s)
self.busy.put(False)
status._finished(success=True)
threading.Thread(target=run_and_delay, daemon=True).start()
return status
class ApsPssShutterWithStatus(Device):
"""
APS PSS shutter
* APS PSS shutters have separate bit PVs for open and close
* set either bit, the shutter moves, and the bit resets a short time later
* a separate status PV tells if the shutter is open or closed
(see :func:`ApsPssShutter()` for alternative)
USAGE::
A_shutter = ApsPssShutterWithStatus(
"2bma:A_shutter",
"PA:02BM:STA_A_FES_OPEN_PL",
name="A_shutter")
B_shutter = ApsPssShutterWithStatus(
"2bma:B_shutter",
"PA:02BM:STA_B_SBS_OPEN_PL",
name="B_shutter")
A_shutter.open()
A_shutter.close()
or
%mov A_shutter "open"
%mov A_shutter "close"
or
A_shutter.set("open") # MUST be "open", not "Open"
A_shutter.set("close")
When using the shutter in a plan, be sure to use `yield from`.
def in_a_plan(shutter):
yield from abs_set(shutter, "open", wait=True)
# do something
yield from abs_set(shutter, "close", wait=True)
RE(in_a_plan(A_shutter))
The strings accepted by `set()` are defined in attributes
(`open_str` and `close_str`).
"""
open_bit = Component(EpicsSignal, ":open")
close_bit = Component(EpicsSignal, ":close")
pss_state = FormattedComponent(EpicsSignalRO, "{self.state_pv}")
# strings the user will use
open_str = 'open'
close_str = 'close'
# pss_state PV values from EPICS
open_val = 1
close_val = 0
def __init__(self, prefix, state_pv, *args, **kwargs):
self.state_pv = state_pv
super().__init__(prefix, *args, **kwargs)
def open(self, timeout=10):
" "
ophyd.status.wait(self.set(self.open_str), timeout=timeout)
def close(self, timeout=10):
" "
ophyd.status.wait(self.set(self.close_str), timeout=timeout)
def set(self, value, **kwargs):
# first, validate the input value
acceptables = (self.close_str, self.open_str)
if value not in acceptables:
msg = "value should be one of " + " | ".join(acceptables)
msg += " : received " + str(value)
raise ValueError(msg)
command_signal = {
self.open_str: self.open_bit,
self.close_str: self.close_bit
}[value]
expected_value = {
self.open_str: self.open_val,
self.close_str: self.close_val
}[value]
working_status = DeviceStatus(self)
def shutter_cb(value, timestamp, **kwargs):
# APS shutter state PVs do not define strings, use numbers
#value = enums[int(value)]
value = int(value)
if value == expected_value:
self.pss_state.clear_sub(shutter_cb)
working_status._finished()
self.pss_state.subscribe(shutter_cb)
command_signal.set(1)
return working_status
@property
def isOpen(self):
" "
return self.pss_state.value == self.open_val
@property
def isClosed(self):
" "
return self.pss_state.value == self.close_val
class SimulatedApsPssShutterWithStatus(Device):
"""
Simulated APS PSS shutter
USAGE::
sim = SimulatedApsPssShutterWithStatus(name="sim")
"""
open_bit = Component(Signal)
close_bit = Component(Signal)
pss_state = FormattedComponent(Signal)
# strings the user will use
open_str = 'open'
close_str = 'close'
# pss_state PV values from EPICS
open_val = 1
close_val = 0
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.open_bit.set(0)
self.close_bit.set(0)
self.pss_state.set(self.close_val)
def open(self, timeout=10):
"""request the shutter to open"""
self.set(self.open_str)
def close(self, timeout=10):
"""request the shutter to close"""
self.set(self.close_str)
def get_response_time(self):
"""simulated response time for PSS status"""
# return 0.5
return np.random.uniform(0.1, 0.9)
def set(self, value, **kwargs):
"""set the shutter to "close" or "open" """
# first, validate the input value
acceptables = (self.close_str, self.open_str)
if value not in acceptables:
msg = "value should be one of " + " | ".join(acceptables)
msg += " : received " + str(value)
raise ValueError(msg)
command_signal = {
self.open_str: self.open_bit,
self.close_str: self.close_bit
}[value]
expected_value = {
self.open_str: self.open_val,
self.close_str: self.close_val
}[value]
working_status = DeviceStatus(self)
simulate_delay = self.pss_state.value != expected_value
def shutter_cb(value, timestamp, **kwargs):
self.pss_state.clear_sub(shutter_cb)
if simulate_delay:
time.sleep(self.get_response_time())
self.pss_state.set(expected_value)
working_status._finished()
self.pss_state.subscribe(shutter_cb)
command_signal.put(1)
# finally, make sure both signals are reset
self.open_bit.put(0)
self.close_bit.put(0)
return working_status
@property
def isOpen(self):
"""is the shutter open?"""
if self.pss_state.value is None:
self.pss_state.set(self.close_val)
return self.pss_state.value == self.open_val
@property
def isClosed(self):
"""is the shutter closed?"""
if self.pss_state.value is None:
self.pss_state.set(self.close_val)
return self.pss_state.value == self.close_val
class ApsUndulator(Device):
"""
APS Undulator
USAGE: ``undulator = ApsUndulator("ID09ds:", name="undulator")``
"""
energy = Component(EpicsSignal, "Energy", write_pv="EnergySet")
energy_taper = Component(EpicsSignal, "TaperEnergy", write_pv="TaperEnergySet")
gap = Component(EpicsSignal, "Gap", write_pv="GapSet")
gap_taper = Component(EpicsSignal, "TaperGap", write_pv="TaperGapSet")
start_button = Component(EpicsSignal, "Start")
stop_button = Component(EpicsSignal, "Stop")
harmonic_value = Component(EpicsSignal, "HarmonicValue")
gap_deadband = Component(EpicsSignal, "DeadbandGap")
device_limit = Component(EpicsSignal, "DeviceLimit")
access_mode = Component(EpicsSignalRO, "AccessSecurity")
device_status = Component(EpicsSignalRO, "Busy")
total_power = Component(EpicsSignalRO, "TotalPower")
message1 = Component(EpicsSignalRO, "Message1")
message2 = Component(EpicsSignalRO, "Message2")
message3 = Component(EpicsSignalRO, "Message3")
time_left = Component(EpicsSignalRO, "ShClosedTime")
device = Component(EpicsSignalRO, "Device")
location = Component(EpicsSignalRO, "Location")
version = Component(EpicsSignalRO, "Version")
class ApsUndulatorDual(Device):
"""
APS Undulator with upstream *and* downstream controls
USAGE: ``undulator = ApsUndulatorDual("ID09", name="undulator")``
note:: the trailing ``:`` in the PV prefix should be omitted
"""
upstream = Component(ApsUndulator, "us:")
downstream = Component(ApsUndulator, "ds:")
class ApsBssUserInfoDevice(Device):
"""
provide current experiment info from the APS BSS
BSS: Beamtime Scheduling System
USAGE::
bss_user_info = ApsBssUserInfoDevice(
"9id_bss:",
name="bss_user_info")
sd.baseline.append(bss_user_info)
"""
proposal_number = Component(EpicsSignal, "proposal_number")
activity = Component(EpicsSignal, "activity", string=True)
badge = Component(EpicsSignal, "badge", string=True)
bss_name = Component(EpicsSignal, "bss_name", string=True)
contact = Component(EpicsSignal, "contact", string=True)
email = Component(EpicsSignal, "email", string=True)
institution = Component(EpicsSignal, "institution", string=True)
station = Component(EpicsSignal, "station", string=True)
team_others = Component(EpicsSignal, "team_others", string=True)
time_begin = Component(EpicsSignal, "time_begin", string=True)
time_end = Component(EpicsSignal, "time_end", string=True)
timestamp = Component(EpicsSignal, "timestamp", string=True)
title = Component(EpicsSignal, "title", string=True)
# not yet updated, see: https://git.aps.anl.gov/jemian/aps_bss_user_info/issues/10
esaf = Component(EpicsSignal, "esaf", string=True)
esaf_contact = Component(EpicsSignal, "esaf_contact", string=True)
esaf_team = Component(EpicsSignal, "esaf_team", string=True)
class AxisTunerException(ValueError):
"""Exception during execution of `AxisTunerBase` subclass"""
pass
class AxisTunerMixin(EpicsMotor):
"""
Mixin class to provide tuning capabilities for an axis
USAGE::
class TunableEpicsMotor(AxisTunerMixin, EpicsMotor):
pass
def a2r_pretune_hook():
# set the counting time for *this* tune
yield from bps.abs_set(scaler.preset_time, 0.2)
a2r = TunableEpicsMotor("xxx:m1", name="a2r")
a2r.tuner = TuneAxis([scaler], a2r, signal_name=scaler.channels.chan2.name)
a2r.tuner.width = 0.02
a2r.tuner.num = 21
a2r.pre_tune_method = a2r_pretune_hook
RE(a2r.tune())
# tune four of the USAXS axes (using preconfigured parameters for each)
RE(tune_axes([mr, m2r, ar, a2r])
HOOK METHODS
There are two hook methods (`pre_tune_method()`, and `post_tune_method()`)
for callers to add additional plan parts, such as opening or closing shutters,
setting detector parameters, or other actions.
Each hook method must accept one argument:
axis object such as `EpicsMotor` or `SynAxis`,
such as::
def my_pre_tune_hook(axis):
yield from bps.mv(shutter, "open")
def my_post_tune_hook(axis):
yield from bps.mv(shutter, "close")
class TunableSynAxis(AxisTunerMixin, SynAxis):
pass
myaxis = TunableSynAxis(name="myaxis")
mydet = SynGauss('mydet', myaxis, 'myaxis', center=0.21, Imax=0.98e5, sigma=0.127)
myaxis.tuner = TuneAxis([mydet], myaxis)
myaxis.pre_tune_method = my_pre_tune_hook
myaxis.post_tune_method = my_post_tune_hook
RE(myaxis.tune())
"""
def __init__(self):
self.tuner = None # such as: APS_BlueSky_tools.plans.TuneAxis
# Hook functions for callers to add additional plan parts
# Each must accept one argument: axis object such as `EpicsMotor` or `SynAxis`
self.pre_tune_method = self._default_pre_tune_method
self.post_tune_method = self._default_post_tune_method
def _default_pre_tune_method(self):
"""called before `tune()`"""
print("{} position before tuning: {}".format(self.name, self.position))
def _default_post_tune_method(self):
"""called after `tune()`"""
print("{} position after tuning: {}".format(self.name, self.position))
def tune(self, md=None, **kwargs):
if self.tuner is None:
msg = "Must define an axis tuner, none specified."
msg += " Consider using APS_BlueSky_tools.plans.TuneAxis()"
raise AxisTunerException(msg)
if self.tuner.axis is None:
msg = "Must define an axis, none specified."
raise AxisTunerException(msg)
if md is None:
md = OrderedDict()
md["purpose"] = "tuner"
md["datetime"] = str(datetime.now())
if self.tuner is not None:
if self.pre_tune_method is not None:
self.pre_tune_method()
yield from self.tuner.tune(md=md, **kwargs)
if self.post_tune_method is not None:
self.post_tune_method()
class TunableEpicsMotor(AxisTunerMixin, EpicsMotor):
"""
EPICS motor with signal for tuning
USAGE::
def a2r_pretune_hook():
# set the counting time for *this* tune
yield from bps.abs_set(scaler.preset_time, 0.2)
a2r = TunableEpicsMotor("xxx:m1", name="a2r")
a2r.tuner = TuneAxis([scaler], a2r, signal_name=scaler.channels.chan2.name)
a2r.tuner.width = 0.02
a2r.tuner.num = 21
a2r.pre_tune_method = a2r_pretune_hook
RE(a2r.tune())
"""
__metaclass__ = EpicsMotor
class EpicsMotorDialMixin(object):
"""
add motor record's dial coordinate fields
USAGE::
class myEpicsMotor(EpicsMotor, EpicsMotorDialMixin): pass
m1 = myEpicsMotor('xxx:m1', name='m1')
"""
dial = Component(EpicsSignal, ".DRBV", write_pv=".DVAL")
class EpicsMotorWithDial(EpicsMotor, EpicsMotorDialMixin):
"""
add motor record's dial coordinates to EpicsMotor
USAGE::
m1 = EpicsMotorWithDial('xxx:m1', name='m1')
This is legacy support. For new work, use `EpicsMotorDialMixin`.
"""
pass
class EpicsMotorLimitsMixin(Device):
"""
add motor record HLM & LLM fields & compatibility get_lim() and set_lim()
"""
soft_limit_lo = Component(EpicsSignal, ".LLM")
soft_limit_hi = Component(EpicsSignal, ".HLM")
def get_lim(self, flag):
"""
Returns the user limit of motor
flag > 0: returns high limit
flag < 0: returns low limit
flag == 0: returns None
"""
if flag > 0:
return self.high_limit
else:
return self.low_limit
def set_lim(self, low, high):
"""
Sets the low and high limits of motor
* Low limit is set to lesser of (low, high)
* High limit is set to greater of (low, high)
* No action taken if motor is moving.
"""
if not self.moving:
self.soft_limit_lo.put(min(low, high))
self.soft_limit_hi.put(max(low, high))
class EpicsMotorServoMixin(object):
"""
add motor record's servo loop controls
USAGE::
class myEpicsMotor(EpicsMotor, EpicsMotorServoMixin): pass
m1 = myEpicsMotor('xxx:m1', name='m1')
"""
# values: "Enable" or "Disable"
servo = Component(EpicsSignal, ".CNEN", string=True)
class EpicsMotorWithServo(EpicsMotor, EpicsMotorServoMixin):
"""
extend basic motor support to enable/disable the servo loop controls
USAGE::
m1 = EpicsMotorWithDial('xxx:m1', name='m1')
This is legacy support. For new work, use `EpicsMotorServoMixin`.
"""
pass
class EpicsMotorRawMixin(object):
"""
add motor record's raw coordinate fields
USAGE::
class myEpicsMotor(EpicsMotor, EpicsMotorRawMixin): pass
m1 = myEpicsMotor('xxx:m1', name='m1')
"""
raw = Component(EpicsSignal, ".RRBV", write_pv=".RVAL")
class EpicsMotorDescriptionMixin(object):
"""
add motor record's description field
USAGE::
class myEpicsMotor(EpicsMotor, EpicsMotorDescriptionMixin): pass
m1 = myEpicsMotor('xxx:m1', name='m1')
"""
desc = Component(EpicsSignal, ".DESC")
class EpicsMotorShutter(Device):
"""
a shutter, implemented with an EPICS motor moved between two positions
USAGE::
tomo_shutter = EpicsMotorShutter("2bma:m23", name="tomo_shutter")
tomo_shutter.closed_position = 1.0 # default
tomo_shutter.open_position = 0.0 # default
tomo_shutter.open()
tomo_shutter.close()
# or, when used in a plan
def planA():
yield from abs_set(tomo_shutter, "open", group="O")
yield from wait("O")
yield from abs_set(tomo_shutter, "close", group="X")
yield from wait("X")
def planA():
yield from abs_set(tomo_shutter, "open", wait=True)
yield from abs_set(tomo_shutter, "close", wait=True)
def planA():
yield from mv(tomo_shutter, "open")
yield from mv(tomo_shutter, "close")
"""
motor = Component(EpicsMotor, "")
closed_position = 1.0
open_position = 0.0
_tolerance = 0.01
@property
def isOpen(self):
" "
return abs(self.motor.position - self.open_position) <= self._tolerance
@property
def isClosed(self):
" "
return abs(self.motor.position - self.closed_position) <= self._tolerance
def open(self):
"""move motor to BEAM NOT BLOCKED position, interactive use"""
self.motor.move(self.open_position)
def close(self):
"""move motor to BEAM BLOCKED position, interactive use"""
self.motor.move(self.closed_position)
def set(self, value, *, timeout=None, settle_time=None):
"""
`set()` is like `put()`, but used in BlueSky plans
PARAMETERS
value : "open" or "close"
timeout : float, optional
Maximum time to wait. Note that set_and_wait does not support
an infinite timeout.
settle_time: float, optional
Delay after the set() has completed to indicate completion
to the caller
RETURNS
status : DeviceStatus
"""
# using put completion:
# timeout and settle time is handled by the status object.
status = DeviceStatus(
self, timeout=timeout, settle_time=settle_time)
def put_callback(**kwargs):
status._finished(success=True)
if value.lower() == "open":
pos = self.open_position
elif value.lower() == "close":
pos = self.closed_position
else:
msg = "value should be either open or close"
msg + " : received " + str(value)
raise ValueError(msg)
self.motor.user_setpoint.put(
pos, use_complete=True, callback=put_callback)
return status
class EpicsOnOffShutter(Device):
"""
a shutter, implemented with an EPICS PV moved between two positions
Use for a shutter controlled by a single PV which takes a
value for the close command and a different value for the open command.
The current position is determined by comparing the value of the control
with the expected open and close values.
USAGE::
bit_shutter = EpicsOnOffShutter("2bma:bit1", name="bit_shutter")
bit_shutter.closed_position = 0 # default
bit_shutter.open_position = 1 # default
bit_shutter.open()
bit_shutter.close()
# or, when used in a plan
def planA():
yield from mv(bit_shutter, "open")
yield from mv(bit_shutter, "close")
"""
control = Component(EpicsSignal, "")
closed_position = 0
open_position = 1
@property
def isOpen(self):
" "
return self.control.value == self.open_position
@property
def isClosed(self):
" "
return self.control.value == self.closed_position
def open(self):
"""move control to BEAM NOT BLOCKED position, interactive use"""
self.control.put(self.open_position)
def close(self):
"""move control to BEAM BLOCKED position, interactive use"""
self.control.put(self.closed_position)
def set(self, value, *, timeout=None, settle_time=None):
"""
`set()` is like `put()`, but used in BlueSky plans
PARAMETERS
value : "open" or "close"
timeout : float, optional
Maximum time to wait. Note that set_and_wait does not support
an infinite timeout.
settle_time: float, optional
Delay after the set() has completed to indicate completion
to the caller
RETURNS
status : DeviceStatus
"""
# using put completion:
# timeout and settle time is handled by the status object.
status = DeviceStatus(
self, timeout=timeout, settle_time=settle_time)
def put_callback(**kwargs):
status._finished(success=True)
if value.lower() == "open":
pos = self.open_position
elif value.lower() == "close":
pos = self.closed_position
else:
msg = "value should be either open or close"
msg + " : received " + str(value)
raise ValueError(msg)
self.control.put(
pos, use_complete=True, callback=put_callback)
return status
class DualPf4FilterBox(Device):
"""
Dual Xia PF4 filter boxes using support from synApps (using Al, Ti foils)
Example::
pf4 = DualPf4FilterBox("2bmb:pf4:", name="pf4")
pf4_AlTi = DualPf4FilterBox("9idcRIO:pf4:", name="pf4_AlTi")
"""
fPosA = Component(EpicsSignal, "fPosA")
fPosB = Component(EpicsSignal, "fPosB")
bankA = Component(EpicsSignalRO, "bankA")
bankB = Component(EpicsSignalRO, "bankB")
bitFlagA = Component(EpicsSignalRO, "bitFlagA")
bitFlagB = Component(EpicsSignalRO, "bitFlagB")
transmission = Component(EpicsSignalRO, "trans")
transmission_a = Component(EpicsSignalRO, "transA")
transmission_b = Component(EpicsSignalRO, "transB")
inverse_transmission = Component(EpicsSignalRO, "invTrans")
thickness_Al_mm = Component(EpicsSignalRO, "filterAl")
thickness_Ti_mm = Component(EpicsSignalRO, "filterTi")
thickness_glass_mm = Component(EpicsSignalRO, "filterGlass")
energy_keV_local = Component(EpicsSignal, "E:local")
energy_keV_mono = Component(EpicsSignal, "displayEnergy")
mode = Component(EpicsSignal, "useMono", string=True)
class ProcedureRegistry(Device):
"""
Procedure Registry: run a blocking function in a thread
With many instruments, such as USAXS, there are several operating
modes to be used, each with its own setup code. This ophyd Device
should coordinate those modes so that the setup procedures can be called
either as part of a Bluesky plan or from the command line directly.
Assumes that users will write functions to setup a particular
operation or operating mode. The user-written functions may not
be appropriate to use in a plan directly since they might
make blocking calls. The ProcedureRegistry will call the function
in a thread (which is allowed to make blocking calls) and wait
for the thread to complete.
It is assumed that each user-written function will not return until
it is complete.
.. autosummary::
~dir
~add
~remove
~set
~put
EXAMPLE:
Given these function definitions::
def clearScalerNames():
for ch in scaler.channels.configuration_attrs:
if ch.find(".") < 0:
chan = scaler.channels.__getattribute__(ch)
chan.chname.put("")
def setMyScalerNames():
scaler.channels.chan01.chname.put("clock")
scaler.channels.chan02.chname.put("I0")
scaler.channels.chan03.chname.put("detector")
create a registry and add the two functions (default name
is the function name):
use_mode = ProcedureRegistry(name="ProcedureRegistry")
use_mode.add(clearScalerNames)
use_mode.add(setMyScalerNames)
and then use this registry in a plan, such as this::
def myPlan():
yield from bps.mv(use_mode, "setMyScalerNames")
yield from bps.sleep(5)
yield from bps.mv(use_mode, "clearScalerNames")
"""
busy = Component(Signal, value=False)
registry = {}
delay_s = 0
timeout_s = None
state = "__created__"
@property
def dir(self):
"""tuple of procedure names"""
return tuple(sorted(self.registry.keys()))
def add(self, procedure, proc_name=None):
"""
add procedure to registry
"""
#if procedure.__class__ == "function":
nm = proc_name or procedure.__name__
self.registry[nm] = procedure
def remove(self, procedure):
"""
remove procedure from registry
"""
if isinstance(procedure, str):
nm = procedure
else:
nm = procedure.__name__
if nm in self.registry:
del self.registry[nm]
def set(self, proc_name):
"""
run procedure in a thread, return once it is complete
proc_name (str) : name of registered procedure to be run
"""
if not isinstance(proc_name, str):
raise ValueError("expected a procedure name, not {}".format(proc_name))
if proc_name not in self.registry:
raise KeyError("unknown procedure name: "+proc_name)
if self.busy.value:
raise RuntimeError("busy now")
self.state = "__busy__"
status = DeviceStatus(self)
@APS_plans.run_in_thread
def run_and_delay():
self.busy.put(True)
self.registry[proc_name]()
# optional delay
if self.delay_s > 0:
time.sleep(self.delay_s)
self.busy.put(False)
status._finished(success=True)
run_and_delay()
ophyd.status.wait(status, timeout=self.timeout_s)
self.state = proc_name
return status
def put(self, value): # TODO: risky?
"""replaces ophyd Device default put() behavior"""
self.set(value)
# AreaDetector support
AD_FrameType_schemes = {
"reset" : dict( # default names from Area Detector code
ZRST = "Normal",
ONST = "Background",
TWST = "FlatField",
),
"NeXus" : dict( # NeXus (typical locations)
ZRST = "/entry/data/data",
ONST = "/entry/data/dark",
TWST = "/entry/data/white",
),
"DataExchange" : dict( # APS Data Exchange
ZRST = "/exchange/data",
ONST = "/exchange/data_dark",
TWST = "/exchange/data_white",
),
}
def AD_setup_FrameType(prefix, scheme="NeXus"):
"""
configure so frames are identified & handled by type (dark, white, or image)
PARAMETERS
prefix (str) : EPICS PV prefix of area detector, such as "13SIM1:"
scheme (str) : any key in the `AD_FrameType_schemes` dictionary
This routine prepares the EPICS Area Detector to identify frames
by image type for handling by clients, such as the HDF5 file writing plugin.
With the HDF5 plugin, the `FrameType` PV is added to the NDattributes
and then used in the layout file to direct the acquired frame to
the chosen dataset. The `FrameType` PV value provides the HDF5 address
to be used.
To use a different scheme than the defaults, add a new key to
the `AD_FrameType_schemes` dictionary, defining storage values for the
fields of the EPICS `mbbo` record that you will be using.
see: https://github.com/BCDA-APS/use_bluesky/blob/master/notebooks/images_darks_flats.ipynb
EXAMPLE::
AD_setup_FrameType("2bmbPG3:", scheme="DataExchange")
* Call this function *before* creating the ophyd area detector object
* use lower-level PyEpics interface
"""
db = AD_FrameType_schemes.get(scheme)
if db is None:
msg = "unknown AD_FrameType_schemes scheme: {}".format(scheme)
msg += "\n Should be one of: " + ", ".join(AD_FrameType_schemes.keys())
raise ValueError(msg)
template = "{}cam1:FrameType{}.{}"
for field, value in db.items():
epics.caput(template.format(prefix, "", field), value)
epics.caput(template.format(prefix, "_RBV", field), value)
def AD_warmed_up(detector):
"""
Has area detector pushed an NDarray to the HDF5 plugin? True or False
Works around an observed issue: #598
https://github.com/NSLS-II/ophyd/issues/598#issuecomment-414311372
If detector IOC has just been started and has not yet taken an image
with the HDF5 plugin, then a TimeoutError will occur as the
HDF5 plugin "Capture" is set to 1 (Start). In such case,
first acquire at least one image with the HDF5 plugin enabled.
"""
old_capture = detector.hdf1.capture.value
old_file_write_mode = detector.hdf1.file_write_mode.value
if old_capture == 1:
return True
detector.hdf1.file_write_mode.put(1)
detector.hdf1.capture.put(1)
verdict = detector.hdf1.capture.get() == 1
detector.hdf1.capture.put(old_capture)
detector.hdf1.file_write_mode.put(old_file_write_mode)
return verdict
class ApsFileStoreHDF5(FileStorePluginBase):
"""
custom class to define image file name from EPICS
To allow users to control the file name,
we override the ``make_filename()`` method here
and we need to override some intervening classes.
To allow users to control the file number,
we override the ``stage()`` method here
and triple-comment out that line, and bring in
sections from the methods we are replacing here.
The image file name is set in `FileStoreBase.make_filename()`
from `ophyd.areadetector.filestore_mixins`. This is called
(during device staging) from `FileStoreBase.stage()`
To use this custom class, we need to connect it to some
intervening structure:
==================================== ============================
custom class superclass(es)
==================================== ============================
``ApsFileStoreHDF5`` ``FileStorePluginBase``
``ApsFileStoreHDF5IterativeWrite`` ``ApsFileStoreHDF5``, `FileStoreIterativeWrite``
``ApsHDF5Plugin`` ``HDF5Plugin``, `ApsFileStoreHDF5IterativeWrite``
==================================== ============================
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.filestore_spec = 'AD_HDF5' # spec name stored in resource doc
self.stage_sigs.update([
('file_template', '%s%s_%4.4d.h5'),
('file_write_mode', 'Stream'),
('capture', 1)
])
def make_filename(self):
"""
overrides default behavior: Get info from EPICS HDF5 plugin.
"""
# start of the file name, file number will be appended per template
filename = self.file_name.value
# this is where the HDF5 plugin will write the image,
# relative to the IOC's filesystem
write_path = self.file_path.value
# this is where the DataBroker will find the image,
# on a filesystem accessible to BlueSky
read_path = write_path
return filename, read_path, write_path
def generate_datum(self, key, timestamp, datum_kwargs):
"""Generate a uid and cache it with its key for later insertion."""
template = self.file_template.get()
filename, read_path, write_path = self.make_filename()
file_number = self.file_number.get()
hdf5_file_name = template % (read_path, filename, file_number)
# inject the actual name of the HDF5 file here into datum_kwargs
datum_kwargs["HDF5_file_name"] = hdf5_file_name
# print("make_filename:", hdf5_file_name)
return super().generate_datum(key, timestamp, datum_kwargs)
def get_frames_per_point(self):
""" """
return self.num_capture.get()
def stage(self):
""" """
# Make a filename.
filename, read_path, write_path = self.make_filename()
# Ensure we do not have an old file open.
set_and_wait(self.capture, 0)
# These must be set before parent is staged (specifically
# before capture mode is turned on. They will not be reset
# on 'unstage' anyway.
set_and_wait(self.file_path, write_path)
set_and_wait(self.file_name, filename)
### set_and_wait(self.file_number, 0)
# get file number now since it is incremented during stage()
file_number = self.file_number.get()
# Must avoid parent's stage() since it sets file_number to 0
# Want to call grandparent's stage()
#super().stage() # avoid this - sets `file_number` to zero
# call grandparent.stage()
FileStoreBase.stage(self)
# AD does the file name templating in C
# We can't access that result until after acquisition
# so we apply the same template here in Python.
template = self.file_template.get()
self._fn = template % (read_path, filename, file_number)
self._fp = read_path
if not self.file_path_exists.get():
raise IOError("Path {} does not exist on IOC.".format(
self.file_path.get()))
# from FileStoreIterativeWrite.stage()
self._point_counter = itertools.count()
# from FileStoreHDF5.stage()
res_kwargs = {'frame_per_point': self.get_frames_per_point()}
self._generate_resource(res_kwargs)
class ApsFileStoreHDF5IterativeWrite(ApsFileStoreHDF5, FileStoreIterativeWrite):
"""custom class to enable users to control image file name"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
FileStoreIterativeWrite.__init__(self, *args, **kwargs)
class ApsHDF5Plugin(HDF5Plugin, ApsFileStoreHDF5IterativeWrite):
"""
custom class to take image file names from EPICS
NOTE: replaces standard Bluesky algorithm where file names
are defined as UUID strings, virtually guaranteeing that
no existing images files will ever be overwritten.
*Caveat emptor* applies here. You assume some expertise!
USAGE::
class MySimDetector(SingleTrigger, SimDetector):
'''SimDetector with HDF5 file names specified by EPICS'''
cam = ADComponent(MyAltaCam, "cam1:")
image = ADComponent(ImagePlugin, "image1:")
hdf1 = ADComponent(
ApsHDF5Plugin,
suffix = "HDF1:",
root = "/",
write_path_template = "/local/data",
)
simdet = MySimDetector("13SIM1:", name="simdet")
# remove this so array counter is not set to zero each staging
del simdet.hdf1.stage_sigs["array_counter"]
simdet.hdf1.stage_sigs["file_template"] = '%s%s_%3.3d.h5'
simdet.hdf1.file_path.put("/local/data/demo/")
simdet.hdf1.file_name.put("test")
simdet.hdf1.array_counter.put(0)
RE(bp.count([simdet]))
""" | APS-BlueSky-tools | /APS_BlueSky_tools-2019.103.0.tar.gz/APS_BlueSky_tools-2019.103.0/APS_BlueSky_tools/legacy_code___devices.py | legacy_code___devices.py |
from collections import OrderedDict
from ophyd.device import (
Device,
Component as Cpt,
DynamicDeviceComponent as DDC,
FormattedComponent as FC)
from ophyd import EpicsSignal, EpicsSignalRO
from ophyd.status import DeviceStatus
__all__ = """
sscanRecord
sscanDevice
""".split()
class sscanPositioner(Device):
"""positioner of an EPICS sscan record"""
readback_pv = FC(EpicsSignal, '{self.prefix}.R{self._ch_num}PV')
readback_value = FC(EpicsSignalRO, '{self.prefix}.R{self._ch_num}CV')
setpoint_pv = FC(EpicsSignal, '{self.prefix}.P{self._ch_num}PV')
setpoint_value = FC(EpicsSignalRO, '{self.prefix}.P{self._ch_num}DV')
start = FC(EpicsSignal, '{self.prefix}.P{self._ch_num}SP')
center = FC(EpicsSignal, '{self.prefix}.P{self._ch_num}CP')
end = FC(EpicsSignal, '{self.prefix}.P{self._ch_num}EP')
step_size = FC(EpicsSignal, '{self.prefix}.P{self._ch_num}SI')
width = FC(EpicsSignal, '{self.prefix}.P{self._ch_num}WD')
abs_rel = FC(EpicsSignal, '{self.prefix}.P{self._ch_num}AR')
mode = FC(EpicsSignal, '{self.prefix}.P{self._ch_num}SM')
units = FC(EpicsSignalRO, '{self.prefix}.P{self._ch_num}EU')
def __init__(self, prefix, num, **kwargs):
self._ch_num = num
super().__init__(prefix, **kwargs)
def reset(self):
"""set all fields to default values"""
self.readback_pv.put("")
self.setpoint_pv.put("")
self.start.put(0)
self.center.put(0)
self.end.put(0)
self.step_size.put(0)
self.width.put(0)
self.abs_rel.put("ABSOLUTE")
self.mode.put("LINEAR")
class sscanDetector(Device):
"""detector of an EPICS sscan record"""
input_pv = FC(EpicsSignal, '{self.prefix}.D{self._ch_num}PV')
current_value = FC(EpicsSignal, '{self.prefix}.D{self._ch_num}CV')
def __init__(self, prefix, num, **kwargs):
self._ch_num = num
super().__init__(prefix, **kwargs)
def reset(self):
"""set all fields to default values"""
self.input_pv.put("")
class sscanTrigger(Device):
"""detector trigger of an EPICS sscan record"""
trigger_pv = FC(EpicsSignal, '{self.prefix}.T{self._ch_num}PV')
trigger_value = FC(EpicsSignal, '{self.prefix}.T{self._ch_num}CD')
def __init__(self, prefix, num, **kwargs):
self._ch_num = num
super().__init__(prefix, **kwargs)
def reset(self):
"""set all fields to default values"""
self.trigger_pv.put("")
self.trigger_value.put(1)
def _sscan_positioners(channel_list):
defn = OrderedDict()
for chan in channel_list:
attr = 'p{}'.format(chan)
defn[attr] = (sscanPositioner, '', {'num': chan})
return defn
def _sscan_detectors(channel_list):
defn = OrderedDict()
for chan in channel_list:
attr = 'd{}'.format(chan)
defn[attr] = (sscanDetector, '', {'num': chan})
return defn
def _sscan_triggers(channel_list):
defn = OrderedDict()
for chan in channel_list:
attr = 't{}'.format(chan)
defn[attr] = (sscanTrigger, '', {'num': chan})
return defn
class sscanRecord(Device):
"""EPICS synApps sscan record: used as $(P):scan(N)"""
desc = Cpt(EpicsSignal, '.DESC')
faze = Cpt(EpicsSignalRO, '.FAZE')
data_state = Cpt(EpicsSignalRO, '.DSTATE')
npts = Cpt(EpicsSignal, '.NPTS')
cpt = Cpt(EpicsSignalRO, '.CPT')
pasm = Cpt(EpicsSignal, '.PASM')
exsc = Cpt(EpicsSignal, '.EXSC')
bspv = Cpt(EpicsSignal, '.BSPV')
bscd = Cpt(EpicsSignal, '.BSCD')
bswait = Cpt(EpicsSignal, '.BSWAIT')
cmnd = Cpt(EpicsSignal, '.CMND')
ddly = Cpt(EpicsSignal, '.DDLY')
pdly = Cpt(EpicsSignal, '.PDLY')
refd = Cpt(EpicsSignal, '.REFD')
wait = Cpt(EpicsSignal, '.WAIT')
wcnt = Cpt(EpicsSignalRO, '.WCNT')
awct = Cpt(EpicsSignal, '.AWCT')
acqt = Cpt(EpicsSignal, '.ACQT')
acqm = Cpt(EpicsSignal, '.ACQM')
atime = Cpt(EpicsSignal, '.ATIME')
copyto = Cpt(EpicsSignal, '.COPYTO')
a1pv = Cpt(EpicsSignal, '.A1PV')
a1cd = Cpt(EpicsSignal, '.A1CD')
aspv = Cpt(EpicsSignal, '.ASPV')
ascd = Cpt(EpicsSignal, '.ASCD')
positioners = DDC(
_sscan_positioners(
"1 2 3 4".split()
)
)
detectors = DDC(
_sscan_detectors(
["%02d" % k for k in range(1,71)]
)
)
triggers = DDC(
_sscan_triggers(
"1 2 3 4".split()
)
)
def set(self, value, **kwargs):
"""interface to use bps.mv()"""
if value != 1:
return
working_status = DeviceStatus(self)
started = False
def exsc_cb(value, timestamp, **kwargs):
value = int(value)
if started and value == 0:
working_status._finished()
self.exsc.subscribe(exsc_cb)
self.exsc.set(1)
started = True
return working_status
def reset(self):
"""set all fields to default values"""
self.desc.put(self.desc.pvname.split(".")[0])
self.npts.put(1000)
for part in (self.positioners, self.detectors, self.triggers):
for ch_name in part.read_attrs:
channel = part.__getattr__(ch_name)
channel.reset()
self.a1pv.put("")
self.acqm.put("NORMAL")
if self.name.find("scanH") > 0:
self.acqt.put("1D ARRAY")
else:
self.acqt.put("SCALAR")
self.aspv.put("")
self.bspv.put("")
self.pasm.put("STAY")
self.bswait.put("Wait")
self.a1cd.put(1)
self.ascd.put(1)
self.bscd.put(1)
self.refd.put(1)
self.atime.put(0)
self.awct.put(0)
self.copyto.put(0)
self.ddly.put(0)
self.pdly.put(0)
while self.wcnt.get() > 0:
self.wait.put(0)
class sscanDevice(Device):
"""synApps XXX IOC setup of sscan records: $(P):scan$(N)"""
scan_dimension = Cpt(EpicsSignalRO, 'ScanDim')
scan_pause = Cpt(EpicsSignal, 'scanPause')
abort_scans = Cpt(EpicsSignal, 'AbortScans')
scan1 = Cpt(sscanRecord, 'scan1')
scan2 = Cpt(sscanRecord, 'scan2')
scan3 = Cpt(sscanRecord, 'scan3')
scan4 = Cpt(sscanRecord, 'scan4')
scanH = Cpt(sscanRecord, 'scanH')
resume_delay = Cpt(EpicsSignal, 'scanResumeSEQ.DLY1')
def reset(self):
"""set all fields to default values"""
self.scan1.reset()
self.scan2.reset()
self.scan3.reset()
self.scan4.reset()
self.scanH.reset() | APS-BlueSky-tools | /APS_BlueSky_tools-2019.103.0.tar.gz/APS_BlueSky_tools-2019.103.0/APS_BlueSky_tools/synApps_ophyd/sscan.py | sscan.py |
from collections import OrderedDict
from ophyd.device import (
Device,
Component as Cpt,
DynamicDeviceComponent as DDC,
FormattedComponent as FC)
from ophyd import EpicsSignal, EpicsSignalRO, EpicsMotor
__all__ = """
swaitRecord
userCalcsDevice
swait_setup_random_number
swait_setup_gaussian
swait_setup_lorentzian
swait_setup_incrementer
""".split()
class swaitRecordChannel(Device):
"""channel of a synApps swait record: A-L"""
value = FC(EpicsSignal, '{self.prefix}.{self._ch_letter}')
input_pv = FC(EpicsSignal, '{self.prefix}.IN{self._ch_letter}N')
input_trigger = FC(EpicsSignal, '{self.prefix}.IN{self._ch_letter}P')
hints = {'fields': ['value',]}
read_attrs = ['value', ]
def __init__(self, prefix, letter, **kwargs):
self._ch_letter = letter
super().__init__(prefix, **kwargs)
def reset(self):
"""set all fields to default values"""
self.value.put(0)
self.input_pv.put("")
self.input_trigger.put("Yes")
def _swait_channels(channel_list):
defn = OrderedDict()
for chan in channel_list:
defn[chan] = (swaitRecordChannel, '', {'letter': chan})
return defn
class swaitRecord(Device):
"""synApps swait record: used as $(P):userCalc$(N)"""
desc = Cpt(EpicsSignal, '.DESC')
scan = Cpt(EpicsSignal, '.SCAN')
calc = Cpt(EpicsSignal, '.CALC')
val = Cpt(EpicsSignalRO, '.VAL')
prec = Cpt(EpicsSignal, '.PREC')
oevt = Cpt(EpicsSignal, '.OEVT')
outn = Cpt(EpicsSignal, '.OUTN')
odly = Cpt(EpicsSignal, '.ODLY')
doln = Cpt(EpicsSignal, '.DOLN')
dold = Cpt(EpicsSignal, '.DOLD')
dopt = Cpt(EpicsSignal, '.DOPT')
oopt = Cpt(EpicsSignal, '.OOPT')
flnk = Cpt(EpicsSignal, '.FLNK')
hints = {'fields': ["channels.%s" % c for c in "A B C D E F G H I J K L".split()]}
read_attrs = ["channels.%s" % c for c in "A B C D E F G H I J K L".split()]
channels = DDC(
_swait_channels(
"A B C D E F G H I J K L".split()
)
)
def reset(self):
"""set all fields to default values"""
self.desc.put(self.desc.pvname.split(".")[0])
self.scan.put("Passive")
self.calc.put("0")
self.prec.put("5")
self.dold.put(0)
self.doln.put("")
self.dopt.put("Use VAL")
self.flnk.put("0")
self.odly.put(0)
self.oopt.put("Every Time")
self.outn.put("")
for letter in self.channels.read_attrs:
channel = self.channels.__getattr__(letter)
if isinstance(channel, swaitRecordChannel):
channel.reset()
self.hints = {'fields': ["channels.%s" % c for c in "A B C D E F G H I J K L".split()]}
self.read_attrs = ["channels.%s" % c for c in "A B C D E F G H I J K L".split()]
self.read_attrs.append('val')
class userCalcsDevice(Device):
"""synApps XXX IOC setup of userCalcs: $(P):userCalc$(N)"""
enable = Cpt(EpicsSignal, 'userCalcEnable')
calc1 = Cpt(swaitRecord, 'userCalc1')
calc2 = Cpt(swaitRecord, 'userCalc2')
calc3 = Cpt(swaitRecord, 'userCalc3')
calc4 = Cpt(swaitRecord, 'userCalc4')
calc5 = Cpt(swaitRecord, 'userCalc5')
calc6 = Cpt(swaitRecord, 'userCalc6')
calc7 = Cpt(swaitRecord, 'userCalc7')
calc8 = Cpt(swaitRecord, 'userCalc8')
calc9 = Cpt(swaitRecord, 'userCalc9')
calc10 = Cpt(swaitRecord, 'userCalc10')
def reset(self):
"""set all fields to default values"""
self.calc1.reset()
self.calc2.reset()
self.calc3.reset()
self.calc4.reset()
self.calc5.reset()
self.calc6.reset()
self.calc7.reset()
self.calc8.reset()
self.calc9.reset()
self.calc10.reset()
self.read_attrs = ["calc%d" % (c+1) for c in range(10)]
def swait_setup_random_number(swait, **kw):
"""setup swait record to generate random numbers"""
swait.reset()
swait.scan.put("Passive")
swait.calc.put("RNDM")
swait.scan.put(".1 second")
swait.desc.put("uniform random numbers")
swait.hints = {"fields": ['val',]}
swait.read_attrs = ['val',]
def swait_setup_gaussian(swait, motor, center=0, width=1, scale=1, noise=0.05):
"""setup swait for noisy Gaussian"""
# consider a noisy background, as well (needs a couple calcs)
assert(isinstance(motor, EpicsMotor))
assert(width > 0)
assert(0.0 <= noise <= 1.0)
swait.reset()
swait.scan.put("Passive")
swait.channels.A.input_pv.put(motor.user_readback.pvname)
swait.channels.B.value.put(center)
swait.channels.C.value.put(width)
swait.channels.D.value.put(scale)
swait.channels.E.value.put(noise)
swait.calc.put("D*(0.95+E*RNDM)/exp(((A-b)/c)^2)")
swait.scan.put("I/O Intr")
swait.desc.put("noisy Gaussian curve")
swait.hints = {"fields": ['val',]}
swait.read_attrs = ['val',]
def swait_setup_lorentzian(swait, motor, center=0, width=1, scale=1, noise=0.05):
"""setup swait record for noisy Lorentzian"""
# consider a noisy background, as well (needs a couple calcs)
assert(isinstance(motor, EpicsMotor))
assert(width > 0)
assert(0.0 <= noise <= 1.0)
swait.reset()
swait.scan.put("Passive")
swait.channels.A.input_pv.put(motor.user_readback.pvname)
swait.channels.B.value.put(center)
swait.channels.C.value.put(width)
swait.channels.D.value.put(scale)
swait.channels.E.value.put(noise)
swait.calc.put("D*(0.95+E*RNDM)/(1+((A-b)/c)^2)")
swait.scan.put("I/O Intr")
swait.desc.put("noisy Lorentzian curve")
swait.hints = {"fields": ['val',]}
swait.read_attrs = ['val',]
def swait_setup_incrementer(swait, scan=None, limit=100000):
"""setup swait record as an incrementer"""
# consider a noisy background, as well (needs a couple calcs)
scan = scan or ".1 second"
swait.reset()
swait.scan.put("Passive")
pvname = swait.val.pvname.split(".")[0]
swait.channels.A.input_pv.put(pvname)
swait.channels.B.value.put(limit)
swait.calc.put("(A+1) % B")
swait.scan.put(scan)
swait.desc.put("incrementer")
swait.hints = {"fields": ['val',]}
swait.read_attrs = ['val',] | APS-BlueSky-tools | /APS_BlueSky_tools-2019.103.0.tar.gz/APS_BlueSky_tools-2019.103.0/APS_BlueSky_tools/synApps_ophyd/swait.py | swait.py |
.. image:: https://github.com/agronholm/apscheduler/actions/workflows/test.yml/badge.svg
:target: https://github.com/agronholm/apscheduler/actions/workflows/test.yml
:alt: Build Status
.. image:: https://coveralls.io/repos/github/agronholm/apscheduler/badge.svg?branch=master
:target: https://coveralls.io/github/agronholm/apscheduler?branch=master
:alt: Code Coverage
.. image:: https://readthedocs.org/projects/apscheduler/badge/?version=latest
:target: https://apscheduler.readthedocs.io/en/master/?badge=latest
:alt: Documentation
.. warning:: The v4.0 series is provided as a **pre-release** and may change in a
backwards incompatible fashion without any migration pathway, so do NOT use this
release in production!
Advanced Python Scheduler (APScheduler) is a task scheduler and task queue system for
Python. It can be used solely as a job queuing system if you have no need for task
scheduling. It scales both up and down, and is suitable for both trivial, single-process
use cases as well as large deployments spanning multiple nodes. Multiple schedulers and
workers can be deployed to use a shared data store to provide both a degree of high
availability and horizontal scaling.
APScheduler comes in both synchronous and asynchronous flavors, making it a good fit for
both traditional, thread-based applications, and asynchronous (asyncio or Trio_)
applications. Documentation and examples are provided for integrating with either WSGI_
or ASGI_ compatible web applications.
Support is provided for persistent storage of schedules and jobs. This means that they
can be shared among multiple scheduler/worker instances and will survive process and
node restarts.
The built-in persistent data store back-ends are:
* PostgreSQL
* MySQL and derivatives
* SQLite
* MongoDB
The built-in event brokers (needed in scenarios with multiple schedulers and/or
workers):
* PostgreSQL
* Redis
* MQTT
The built-in scheduling mechanisms (*triggers*) are:
* Cron-style scheduling
* Interval-based scheduling (runs tasks on even intervals)
* Calendar-based scheduling (runs tasks on intervals of X years/months/weeks/days,
always at the same time of day)
* One-off scheduling (runs a task once, at a specific date/time)
Different scheduling mechanisms can even be combined with so-called *combining triggers*
(see the documentation_ for details).
You can also implement your custom scheduling logic by building your own trigger class.
These will be treated no differently than the built-in ones.
Other notable features include:
* You can limit the maximum number of simultaneous jobs for a given task (function)
* You can limit the amount of time a job is allowed to start late
* Jitter (adjustable, random delays added to the run time of each scheduled job)
.. _Trio: https://pypi.org/project/trio/
.. _WSGI: https://wsgi.readthedocs.io/en/latest/what.html
.. _ASGI: https://asgi.readthedocs.io/en/latest/index.html
.. _documentation: https://apscheduler.readthedocs.io/en/master/
Documentation
=============
Documentation can be found
`here <https://apscheduler.readthedocs.io/en/master/?badge=latest>`_.
Source
======
The source can be browsed at `Github <https://github.com/agronholm/apscheduler>`_.
Reporting bugs
==============
A `bug tracker <https://github.com/agronholm/apscheduler/issues>`_ is provided by
GitHub.
Getting help
============
If you have problems or other questions, you can either:
* Ask in the `apscheduler <https://gitter.im/apscheduler/Lobby>`_ room on Gitter
* Post a question on `GitHub discussions`_, or
* Post a question on StackOverflow_ and add the ``apscheduler`` tag
.. _GitHub discussions: https://github.com/agronholm/apscheduler/discussions/categories/q-a
.. _StackOverflow: http://stackoverflow.com/questions/tagged/apscheduler
| APScheduler | /APScheduler-4.0.0a1.tar.gz/APScheduler-4.0.0a1/README.rst | README.rst |
from __future__ import annotations
from datetime import datetime, timezone
from functools import partial
from traceback import format_tb
from typing import Any
from uuid import UUID
import attrs
from attrs.converters import optional
from . import abc
from ._converters import as_aware_datetime, as_uuid
from ._enums import JobOutcome
from ._structures import JobResult
from ._utils import qualified_name
def serialize(inst, field, value):
if isinstance(value, frozenset):
return list(value)
return value
@attrs.define(kw_only=True, frozen=True)
class Event:
"""
Base class for all events.
:ivar timestamp: the time when the event occurrent
"""
timestamp: datetime = attrs.field(
factory=partial(datetime.now, timezone.utc), converter=as_aware_datetime
)
def marshal(self, serializer: abc.Serializer) -> dict[str, Any]:
return attrs.asdict(self, value_serializer=serialize)
@classmethod
def unmarshal(cls, serializer: abc.Serializer, marshalled: dict[str, Any]) -> Event:
return cls(**marshalled)
#
# Data store events
#
@attrs.define(kw_only=True, frozen=True)
class DataStoreEvent(Event):
"""Base class for events originating from a data store."""
@attrs.define(kw_only=True, frozen=True)
class TaskAdded(DataStoreEvent):
"""
Signals that a new task was added to the store.
:ivar task_id: ID of the task that was added
"""
task_id: str
@attrs.define(kw_only=True, frozen=True)
class TaskUpdated(DataStoreEvent):
"""
Signals that a task was updated in a data store.
:ivar task_id: ID of the task that was updated
"""
task_id: str
@attrs.define(kw_only=True, frozen=True)
class TaskRemoved(DataStoreEvent):
"""
Signals that a task was removed from the store.
:ivar task_id: ID of the task that was removed
"""
task_id: str
@attrs.define(kw_only=True, frozen=True)
class ScheduleAdded(DataStoreEvent):
"""
Signals that a new schedule was added to the store.
:ivar schedule_id: ID of the schedule that was added
:ivar next_fire_time: the first run time calculated for the schedule
"""
schedule_id: str
next_fire_time: datetime | None = attrs.field(converter=optional(as_aware_datetime))
@attrs.define(kw_only=True, frozen=True)
class ScheduleUpdated(DataStoreEvent):
"""
Signals that a schedule has been updated in the store.
:ivar schedule_id: ID of the schedule that was updated
:ivar next_fire_time: the next time the schedule will run
"""
schedule_id: str
next_fire_time: datetime | None = attrs.field(converter=optional(as_aware_datetime))
@attrs.define(kw_only=True, frozen=True)
class ScheduleRemoved(DataStoreEvent):
"""
Signals that a schedule was removed from the store.
:ivar schedule_id: ID of the schedule that was removed
"""
schedule_id: str
@attrs.define(kw_only=True, frozen=True)
class JobAdded(DataStoreEvent):
"""
Signals that a new job was added to the store.
:ivar job_id: ID of the job that was added
:ivar task_id: ID of the task the job would run
:ivar schedule_id: ID of the schedule the job was created from
:ivar tags: the set of tags collected from the associated task and schedule
"""
job_id: UUID = attrs.field(converter=as_uuid)
task_id: str
schedule_id: str | None
tags: frozenset[str] = attrs.field(converter=frozenset)
@attrs.define(kw_only=True, frozen=True)
class JobRemoved(DataStoreEvent):
"""
Signals that a job was removed from the store.
:ivar job_id: ID of the job that was removed
"""
job_id: UUID = attrs.field(converter=as_uuid)
@attrs.define(kw_only=True, frozen=True)
class ScheduleDeserializationFailed(DataStoreEvent):
"""
Signals that the deserialization of a schedule has failed.
:ivar schedule_id: ID of the schedule that failed to deserialize
:ivar exception: the exception that was raised during deserialization
"""
schedule_id: str
exception: BaseException
@attrs.define(kw_only=True, frozen=True)
class JobDeserializationFailed(DataStoreEvent):
"""
Signals that the deserialization of a job has failed.
:ivar job_id: ID of the job that failed to deserialize
:ivar exception: the exception that was raised during deserialization
"""
job_id: UUID = attrs.field(converter=as_uuid)
exception: BaseException
#
# Scheduler events
#
@attrs.define(kw_only=True, frozen=True)
class SchedulerEvent(Event):
"""Base class for events originating from a scheduler."""
@attrs.define(kw_only=True, frozen=True)
class SchedulerStarted(SchedulerEvent):
pass
@attrs.define(kw_only=True, frozen=True)
class SchedulerStopped(SchedulerEvent):
"""
Signals that a scheduler has stopped.
:ivar exception: the exception that caused the scheduler to stop, if any
"""
exception: BaseException | None = None
#
# Worker events
#
@attrs.define(kw_only=True, frozen=True)
class WorkerEvent(Event):
"""Base class for events originating from a worker."""
@attrs.define(kw_only=True, frozen=True)
class WorkerStarted(WorkerEvent):
"""Signals that a worker has started."""
@attrs.define(kw_only=True, frozen=True)
class WorkerStopped(WorkerEvent):
"""
Signals that a worker has stopped.
:ivar exception: the exception that caused the worker to stop, if any
"""
exception: BaseException | None = None
@attrs.define(kw_only=True, frozen=True)
class JobAcquired(WorkerEvent):
"""
Signals that a worker has acquired a job for processing.
:param job_id: the ID of the job that was acquired
:param worker_id: the ID of the worker that acquired the job
"""
job_id: UUID = attrs.field(converter=as_uuid)
worker_id: str
@attrs.define(kw_only=True, frozen=True)
class JobReleased(WorkerEvent):
"""
Signals that a worker has finished processing of a job.
:param job_id: the ID of the job that was released
:param worker_id: the ID of the worker that released the job
:param outcome: the outcome of the job
:param exception_type: the fully qualified name of the exception if ``outcome`` is
:data:`JobOutcome.error`
:param exception_message: the result of ``str(exception)`` if ``outcome`` is
:data:`JobOutcome.error`
:param exception_traceback: the traceback lines from the exception if ``outcome`` is
:data:`JobOutcome.error`
"""
job_id: UUID = attrs.field(converter=as_uuid)
worker_id: str
outcome: JobOutcome
exception_type: str | None = None
exception_message: str | None = None
exception_traceback: list[str] | None = None
@classmethod
def from_result(cls, result: JobResult, worker_id: str) -> JobReleased:
if result.exception is not None:
exception_type: str | None = qualified_name(result.exception.__class__)
exception_message: str | None = str(result.exception)
exception_traceback: list[str] | None = format_tb(
result.exception.__traceback__
)
else:
exception_type = exception_message = exception_traceback = None
return cls(
job_id=result.job_id,
worker_id=worker_id,
outcome=result.outcome,
exception_type=exception_type,
exception_message=exception_message,
exception_traceback=exception_traceback,
) | APScheduler | /APScheduler-4.0.0a1.tar.gz/APScheduler-4.0.0a1/src/apscheduler/_events.py | _events.py |
from __future__ import annotations
import sys
from datetime import date, datetime, tzinfo
from functools import partial
from typing import Any, Callable, overload
from ._exceptions import DeserializationError, SerializationError
if sys.version_info >= (3, 9):
from zoneinfo import ZoneInfo
else:
from backports.zoneinfo import ZoneInfo
def marshal_object(obj) -> tuple[str, Any]:
return (
f"{obj.__class__.__module__}:{obj.__class__.__qualname__}",
obj.__getstate__(),
)
def unmarshal_object(ref: str, state):
cls = callable_from_ref(ref)
instance = cls.__new__(cls)
instance.__setstate__(state)
return instance
@overload
def marshal_date(value: None) -> None:
...
@overload
def marshal_date(value: date) -> str:
...
def marshal_date(value):
return value.isoformat() if value is not None else None
@overload
def unmarshal_date(value: None) -> None:
...
@overload
def unmarshal_date(value: str) -> date:
...
def unmarshal_date(value):
if value is None:
return None
elif len(value) == 10:
return date.fromisoformat(value)
else:
return datetime.fromisoformat(value)
def marshal_timezone(value: tzinfo) -> str:
if isinstance(value, ZoneInfo):
return value.key
elif hasattr(value, "zone"): # pytz timezones
return value.zone
raise SerializationError(
f"Unserializable time zone: {value!r}\n"
f"Only time zones from the zoneinfo or pytz modules can be serialized."
)
def unmarshal_timezone(value: str) -> ZoneInfo:
return ZoneInfo(value)
def callable_to_ref(func: Callable) -> str:
"""
Return a reference to the given callable.
:raises SerializationError: if the given object is not callable, is a partial(),
lambda or local function or does not have the ``__module__`` and
``__qualname__`` attributes
"""
if isinstance(func, partial):
raise SerializationError("Cannot create a reference to a partial()")
if not hasattr(func, "__module__"):
raise SerializationError("Callable has no __module__ attribute")
if not hasattr(func, "__qualname__"):
raise SerializationError("Callable has no __qualname__ attribute")
if "<lambda>" in func.__qualname__:
raise SerializationError("Cannot create a reference to a lambda")
if "<locals>" in func.__qualname__:
raise SerializationError("Cannot create a reference to a nested function")
return f"{func.__module__}:{func.__qualname__}"
def callable_from_ref(ref: str) -> Callable:
"""
Return the callable pointed to by ``ref``.
:raises DeserializationError: if the reference could not be resolved or the looked
up object is not callable
"""
if ":" not in ref:
raise ValueError(f"Invalid reference: {ref}")
modulename, rest = ref.split(":", 1)
try:
obj = __import__(modulename, fromlist=[rest])
except ImportError:
raise LookupError(f"Error resolving reference {ref!r}: could not import module")
try:
for name in rest.split("."):
obj = getattr(obj, name)
except Exception:
raise DeserializationError(
f"Error resolving reference {ref!r}: error looking up object"
)
if not callable(obj):
raise DeserializationError(
f"{ref!r} points to an object of type "
f"{obj.__class__.__qualname__} which is not callable"
)
return obj | APScheduler | /APScheduler-4.0.0a1.tar.gz/APScheduler-4.0.0a1/src/apscheduler/marshalling.py | marshalling.py |
from __future__ import annotations
from uuid import UUID
class TaskLookupError(LookupError):
"""Raised by a data store when it cannot find the requested task."""
def __init__(self, task_id: str):
super().__init__(f"No task by the id of {task_id!r} was found")
class ScheduleLookupError(LookupError):
"""Raised by a scheduler when it cannot find the requested schedule."""
def __init__(self, schedule_id: str):
super().__init__(f"No schedule by the id of {schedule_id!r} was found")
class JobLookupError(LookupError):
"""Raised when the job store cannot find a job for update or removal."""
def __init__(self, job_id: UUID):
super().__init__(f"No job by the id of {job_id} was found")
class JobResultNotReady(Exception):
"""
Raised by :meth:`~.schedulers.sync.Scheduler.get_job_result` if the job result is
not ready.
"""
def __init__(self, job_id: UUID):
super().__init__(f"No job by the id of {job_id} was found")
class JobCancelled(Exception):
"""
Raised by :meth:`~.schedulers.sync.Scheduler.get_job_result` if the job was
cancelled.
"""
class JobDeadlineMissed(Exception):
"""
Raised by :meth:`~.schedulers.sync.Scheduler.get_job_result` if the job failed to
start within the allotted time.
"""
class ConflictingIdError(KeyError):
"""
Raised when trying to add a schedule to a store that already contains a schedule by
that ID, and the conflict policy of ``exception`` is used.
"""
def __init__(self, schedule_id):
super().__init__(
f"This data store already contains a schedule with the identifier "
f"{schedule_id!r}"
)
class SerializationError(Exception):
"""Raised when a serializer fails to serialize the given object."""
class DeserializationError(Exception):
"""Raised when a serializer fails to deserialize the given object."""
class MaxIterationsReached(Exception):
"""
Raised when a trigger has reached its maximum number of allowed computation
iterations when trying to calculate the next fire time.
""" | APScheduler | /APScheduler-4.0.0a1.tar.gz/APScheduler-4.0.0a1/src/apscheduler/_exceptions.py | _exceptions.py |
from __future__ import annotations
from collections.abc import Callable
from datetime import datetime, timedelta, timezone
from functools import partial
from typing import TYPE_CHECKING, Any
from uuid import UUID, uuid4
import attrs
import tenacity.stop
import tenacity.wait
from attrs.validators import instance_of
from ._converters import as_enum, as_timedelta
from ._enums import CoalescePolicy, JobOutcome
from .marshalling import callable_from_ref, callable_to_ref
if TYPE_CHECKING:
from .abc import Serializer, Trigger
def serialize(inst, field, value):
if isinstance(value, frozenset):
return list(value)
return value
@attrs.define(kw_only=True)
class Task:
"""
Represents a callable and its surrounding configuration parameters.
:var str id: the unique identifier of this task
:var ~collections.abc.Callable func: the callable that is called when this task is
run
:var int | None max_running_jobs: maximum number of instances of this task that are
allowed to run concurrently
:var ~datetime.timedelta | None misfire_grace_time: maximum number of seconds the
run time of jobs created for this task are allowed to be late, compared to the
scheduled run time
"""
id: str
func: Callable = attrs.field(eq=False, order=False)
max_running_jobs: int | None = attrs.field(eq=False, order=False, default=None)
misfire_grace_time: timedelta | None = attrs.field(
eq=False, order=False, default=None
)
state: Any = None
def marshal(self, serializer: Serializer) -> dict[str, Any]:
marshalled = attrs.asdict(self, value_serializer=serialize)
marshalled["func"] = callable_to_ref(self.func)
marshalled["state"] = serializer.serialize(self.state) if self.state else None
return marshalled
@classmethod
def unmarshal(cls, serializer: Serializer, marshalled: dict[str, Any]) -> Task:
marshalled["func"] = callable_from_ref(marshalled["func"])
if marshalled["state"] is not None:
marshalled["state"] = serializer.deserialize(marshalled["state"])
return cls(**marshalled)
@attrs.define(kw_only=True)
class Schedule:
"""
Represents a schedule on which a task will be run.
:var str id: the unique identifier of this schedule
:var str task_id: unique identifier of the task to be run on this schedule
:var tuple args: positional arguments to pass to the task callable
:var dict[str, Any] kwargs: keyword arguments to pass to the task callable
:var CoalescePolicy coalesce: determines what to do when processing the schedule if
multiple fire times have become due for this schedule since the last processing
:var ~datetime.timedelta | None misfire_grace_time: maximum number of seconds the
scheduled job's actual run time is allowed to be late, compared to the scheduled
run time
:var ~datetime.timedelta | None max_jitter: maximum number of seconds to randomly
add to the scheduled time for each job created from this schedule
:var frozenset[str] tags: strings that can be used to categorize and filter the
schedule and its derivative jobs
:var ConflictPolicy conflict_policy: determines what to do if a schedule with the
same ID already exists in the data store
:var ~datetime.datetime next_fire_time: the next time the task will be run
:var ~datetime.datetime | None last_fire_time: the last time the task was scheduled
to run
:var str | None acquired_by: ID of the scheduler that has acquired this schedule for
processing
:var str | None acquired_until: the time after which other schedulers are free to
acquire the schedule for processing even if it is still marked as acquired
"""
id: str
task_id: str = attrs.field(eq=False, order=False)
trigger: Trigger = attrs.field(eq=False, order=False)
args: tuple = attrs.field(eq=False, order=False, converter=tuple, default=())
kwargs: dict[str, Any] = attrs.field(
eq=False, order=False, converter=dict, default=()
)
coalesce: CoalescePolicy = attrs.field(
eq=False,
order=False,
default=CoalescePolicy.latest,
converter=as_enum(CoalescePolicy),
)
misfire_grace_time: timedelta | None = attrs.field(
eq=False, order=False, default=None, converter=as_timedelta
)
max_jitter: timedelta | None = attrs.field(
eq=False, order=False, converter=as_timedelta, default=None
)
tags: frozenset[str] = attrs.field(
eq=False, order=False, converter=frozenset, default=()
)
next_fire_time: datetime | None = attrs.field(eq=False, order=False, default=None)
last_fire_time: datetime | None = attrs.field(eq=False, order=False, default=None)
acquired_by: str | None = attrs.field(eq=False, order=False, default=None)
acquired_until: datetime | None = attrs.field(eq=False, order=False, default=None)
def marshal(self, serializer: Serializer) -> dict[str, Any]:
marshalled = attrs.asdict(self, value_serializer=serialize)
marshalled["trigger"] = serializer.serialize(self.trigger)
marshalled["args"] = serializer.serialize(self.args)
marshalled["kwargs"] = serializer.serialize(self.kwargs)
if not self.acquired_by:
del marshalled["acquired_by"]
del marshalled["acquired_until"]
return marshalled
@classmethod
def unmarshal(cls, serializer: Serializer, marshalled: dict[str, Any]) -> Schedule:
marshalled["trigger"] = serializer.deserialize(marshalled["trigger"])
marshalled["args"] = serializer.deserialize(marshalled["args"])
marshalled["kwargs"] = serializer.deserialize(marshalled["kwargs"])
return cls(**marshalled)
@property
def next_deadline(self) -> datetime | None:
if self.next_fire_time and self.misfire_grace_time:
return self.next_fire_time + self.misfire_grace_time
return None
@attrs.define(kw_only=True)
class Job:
"""
Represents a queued request to run a task.
:var ~uuid.UUID id: autogenerated unique identifier of the job
:var str task_id: unique identifier of the task to be run
:var tuple args: positional arguments to pass to the task callable
:var dict[str, Any] kwargs: keyword arguments to pass to the task callable
:var str schedule_id: unique identifier of the associated schedule
(if the job was derived from a schedule)
:var ~datetime.datetime | None scheduled_fire_time: the time the job was scheduled
to run at (if the job was derived from a schedule; includes jitter)
:var ~datetime.timedelta | None jitter: the time that was randomly added to the
calculated scheduled run time (if the job was derived from a schedule)
:var ~datetime.datetime | None start_deadline: if the job is started in the worker
after this time, it is considered to be misfired and will be aborted
:var ~datetime.timedelta result_expiration_time: minimum amount of time to keep the
result available for fetching in the data store
:var frozenset[str] tags: strings that can be used to categorize and filter the job
:var ~datetime.datetime created_at: the time at which the job was created
:var ~datetime.datetime | None started_at: the time at which the execution of the
job was started
:var str | None acquired_by: the unique identifier of the worker that has acquired
the job for execution
:var str | None acquired_until: the time after which other workers are free to
acquire the job for processing even if it is still marked as acquired
"""
id: UUID = attrs.field(factory=uuid4)
task_id: str = attrs.field(eq=False, order=False)
args: tuple = attrs.field(eq=False, order=False, converter=tuple, default=())
kwargs: dict[str, Any] = attrs.field(
eq=False, order=False, converter=dict, default=()
)
schedule_id: str | None = attrs.field(eq=False, order=False, default=None)
scheduled_fire_time: datetime | None = attrs.field(
eq=False, order=False, default=None
)
jitter: timedelta = attrs.field(
eq=False, order=False, converter=as_timedelta, factory=timedelta
)
start_deadline: datetime | None = attrs.field(eq=False, order=False, default=None)
result_expiration_time: timedelta = attrs.field(
eq=False, order=False, converter=as_timedelta, default=timedelta()
)
tags: frozenset[str] = attrs.field(
eq=False, order=False, converter=frozenset, default=()
)
created_at: datetime = attrs.field(
eq=False, order=False, factory=partial(datetime.now, timezone.utc)
)
started_at: datetime | None = attrs.field(eq=False, order=False, default=None)
acquired_by: str | None = attrs.field(eq=False, order=False, default=None)
acquired_until: datetime | None = attrs.field(eq=False, order=False, default=None)
@property
def original_scheduled_time(self) -> datetime | None:
"""The scheduled time without any jitter included."""
if self.scheduled_fire_time is None:
return None
return self.scheduled_fire_time - self.jitter
def marshal(self, serializer: Serializer) -> dict[str, Any]:
marshalled = attrs.asdict(self, value_serializer=serialize)
marshalled["args"] = serializer.serialize(self.args)
marshalled["kwargs"] = serializer.serialize(self.kwargs)
if not self.acquired_by:
del marshalled["acquired_by"]
del marshalled["acquired_until"]
return marshalled
@classmethod
def unmarshal(cls, serializer: Serializer, marshalled: dict[str, Any]) -> Job:
marshalled["args"] = serializer.deserialize(marshalled["args"])
marshalled["kwargs"] = serializer.deserialize(marshalled["kwargs"])
return cls(**marshalled)
@attrs.define(kw_only=True)
class JobInfo:
"""
Contains information about the currently running job.
This information is available in the thread or task where a job is currently being
run, available from :data:`~apscheduler.current_job`.
:var ~uuid.UUID job_id: the unique identifier of the job
:var str task_id: the unique identifier of the task that is being run
:var str | None schedule_id: the unique identifier of the schedule that the job was
derived from (if any)
:var ~datetime.datetime | None scheduled_fire_time: the time the job was scheduled
to run at (if the job was derived from a schedule; includes jitter)
:var ~datetime.timedelta jitter: the time that was randomly added to the calculated
scheduled run time (if the job was derived from a schedule)
:var ~datetime.datetime | None start_deadline: if the job is started in the worker
after this time, it is considered to be misfired and will be aborted
:var frozenset[str] tags: strings that can be used to categorize and filter the job
"""
job_id: UUID
task_id: str
schedule_id: str | None
scheduled_fire_time: datetime | None
jitter: timedelta
start_deadline: datetime | None
tags: frozenset[str]
@classmethod
def from_job(cls, job: Job) -> JobInfo:
return cls(
job_id=job.id,
task_id=job.task_id,
schedule_id=job.schedule_id,
scheduled_fire_time=job.scheduled_fire_time,
jitter=job.jitter,
start_deadline=job.start_deadline,
tags=job.tags,
)
@attrs.define(kw_only=True, frozen=True)
class JobResult:
"""
Represents the result of running a job.
:var ~uuid.UUID job_id: the unique identifier of the job
:var JobOutcome outcome: indicates how the job ended
:var ~datetime.datetime finished_at: the time when the job ended
:var BaseException | None exception: the exception object if the job ended due to an
exception being raised
:var return_value: the return value from the task function (if the job ran to
completion successfully)
"""
job_id: UUID
outcome: JobOutcome = attrs.field(
eq=False, order=False, converter=as_enum(JobOutcome)
)
finished_at: datetime = attrs.field(
eq=False, order=False, factory=partial(datetime.now, timezone.utc)
)
expires_at: datetime = attrs.field(eq=False, order=False)
exception: BaseException | None = attrs.field(eq=False, order=False, default=None)
return_value: Any = attrs.field(eq=False, order=False, default=None)
@classmethod
def from_job(
cls,
job: Job,
outcome: JobOutcome,
*,
finished_at: datetime | None = None,
exception: BaseException | None = None,
return_value: Any = None,
) -> JobResult:
real_finished_at = finished_at or datetime.now(timezone.utc)
expires_at = real_finished_at + job.result_expiration_time
return cls(
job_id=job.id,
outcome=outcome,
finished_at=real_finished_at,
expires_at=expires_at,
exception=exception,
return_value=return_value,
)
def marshal(self, serializer: Serializer) -> dict[str, Any]:
marshalled = attrs.asdict(self, value_serializer=serialize)
if self.outcome is JobOutcome.error:
marshalled["exception"] = serializer.serialize(self.exception)
else:
del marshalled["exception"]
if self.outcome is JobOutcome.success:
marshalled["return_value"] = serializer.serialize(self.return_value)
else:
del marshalled["return_value"]
return marshalled
@classmethod
def unmarshal(cls, serializer: Serializer, marshalled: dict[str, Any]) -> JobResult:
if marshalled.get("exception"):
marshalled["exception"] = serializer.deserialize(marshalled["exception"])
elif marshalled.get("return_value"):
marshalled["return_value"] = serializer.deserialize(
marshalled["return_value"]
)
return cls(**marshalled)
@attrs.define(kw_only=True, frozen=True)
class RetrySettings:
"""
Settings for retrying an operation with Tenacity.
:param stop: defines when to stop trying
:param wait: defines how long to wait between attempts
"""
stop: tenacity.stop.stop_base = attrs.field(
validator=instance_of(tenacity.stop.stop_base),
default=tenacity.stop_after_delay(60),
)
wait: tenacity.wait.wait_base = attrs.field(
validator=instance_of(tenacity.wait.wait_base),
default=tenacity.wait_exponential(min=0.5, max=20),
) | APScheduler | /APScheduler-4.0.0a1.tar.gz/APScheduler-4.0.0a1/src/apscheduler/_structures.py | _structures.py |
from __future__ import annotations
from abc import ABCMeta, abstractmethod
from datetime import datetime
from typing import TYPE_CHECKING, Any, Callable, Iterable, Iterator
from uuid import UUID
if TYPE_CHECKING:
from ._enums import ConflictPolicy
from ._events import Event
from ._structures import Job, JobResult, Schedule, Task
class Trigger(Iterator[datetime], metaclass=ABCMeta):
"""
Abstract base class that defines the interface that every trigger must implement.
"""
__slots__ = ()
@abstractmethod
def next(self) -> datetime | None:
"""
Return the next datetime to fire on.
If no such datetime can be calculated, ``None`` is returned.
:raises apscheduler.exceptions.MaxIterationsReached:
"""
@abstractmethod
def __getstate__(self):
"""Return the (JSON compatible) serializable state of the trigger."""
@abstractmethod
def __setstate__(self, state):
"""Initialize an empty instance from an existing state."""
def __iter__(self):
return self
def __next__(self) -> datetime:
dateval = self.next()
if dateval is None:
raise StopIteration
else:
return dateval
class Serializer(metaclass=ABCMeta):
"""Interface for classes that implement (de)serialization."""
__slots__ = ()
@abstractmethod
def serialize(self, obj: Any) -> bytes:
"""
Turn the given object into a bytestring.
:return: a bytestring that can be later restored using :meth:`deserialize`
"""
@abstractmethod
def deserialize(self, serialized: bytes) -> Any:
"""
Restore a previously serialized object from bytestring
:param serialized: a bytestring previously received from :meth:`serialize`
:return: a copy of the original object
"""
class Subscription(metaclass=ABCMeta):
"""
Represents a subscription with an event source.
If used as a context manager, unsubscribes on exit.
"""
def __enter__(self) -> Subscription:
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
self.unsubscribe()
@abstractmethod
def unsubscribe(self) -> None:
"""
Cancel this subscription.
Does nothing if the subscription has already been cancelled.
"""
class EventSource(metaclass=ABCMeta):
"""
Interface for objects that can deliver notifications to interested subscribers.
"""
@abstractmethod
def subscribe(
self,
callback: Callable[[Event], Any],
event_types: Iterable[type[Event]] | None = None,
*,
one_shot: bool = False,
) -> Subscription:
"""
Subscribe to events from this event source.
:param callback: callable to be called with the event object when an event is
published
:param event_types: an iterable of concrete Event classes to subscribe to
:param one_shot: if ``True``, automatically unsubscribe after the first matching
event
"""
class EventBroker(EventSource):
"""
Interface for objects that can be used to publish notifications to interested
subscribers.
"""
@abstractmethod
def start(self) -> None:
pass
@abstractmethod
def stop(self, *, force: bool = False) -> None:
pass
@abstractmethod
def publish(self, event: Event) -> None:
"""Publish an event."""
@abstractmethod
def publish_local(self, event: Event) -> None:
"""Publish an event, but only to local subscribers."""
class AsyncEventBroker(EventSource):
"""Asynchronous version of :class:`EventBroker`. Expected to work on asyncio."""
@abstractmethod
async def start(self) -> None:
pass
@abstractmethod
async def stop(self, *, force: bool = False) -> None:
pass
@abstractmethod
async def publish(self, event: Event) -> None:
"""Publish an event."""
@abstractmethod
async def publish_local(self, event: Event) -> None:
"""Publish an event, but only to local subscribers."""
class DataStore:
@abstractmethod
def start(self, event_broker: EventBroker) -> None:
pass
@abstractmethod
def stop(self, *, force: bool = False) -> None:
pass
@property
@abstractmethod
def events(self) -> EventSource:
pass
@abstractmethod
def add_task(self, task: Task) -> None:
"""
Add the given task to the store.
If a task with the same ID already exists, it replaces the old one but does NOT
affect task accounting (# of running jobs).
:param task: the task to be added
"""
@abstractmethod
def remove_task(self, task_id: str) -> None:
"""
Remove the task with the given ID.
:param task_id: ID of the task to be removed
:raises TaskLookupError: if no matching task was found
"""
@abstractmethod
def get_task(self, task_id: str) -> Task:
"""
Get an existing task definition.
:param task_id: ID of the task to be returned
:return: the matching task
:raises TaskLookupError: if no matching task was found
"""
@abstractmethod
def get_tasks(self) -> list[Task]:
"""
Get all the tasks in this store.
:return: a list of tasks, sorted by ID
"""
@abstractmethod
def get_schedules(self, ids: set[str] | None = None) -> list[Schedule]:
"""
Get schedules from the data store.
:param ids: a specific set of schedule IDs to return, or ``None`` to return all
schedules
:return: the list of matching schedules, in unspecified order
"""
@abstractmethod
def add_schedule(self, schedule: Schedule, conflict_policy: ConflictPolicy) -> None:
"""
Add or update the given schedule in the data store.
:param schedule: schedule to be added
:param conflict_policy: policy that determines what to do if there is an
existing schedule with the same ID
"""
@abstractmethod
def remove_schedules(self, ids: Iterable[str]) -> None:
"""
Remove schedules from the data store.
:param ids: a specific set of schedule IDs to remove
"""
@abstractmethod
def acquire_schedules(self, scheduler_id: str, limit: int) -> list[Schedule]:
"""
Acquire unclaimed due schedules for processing.
This method claims up to the requested number of schedules for the given
scheduler and returns them.
:param scheduler_id: unique identifier of the scheduler
:param limit: maximum number of schedules to claim
:return: the list of claimed schedules
"""
@abstractmethod
def release_schedules(self, scheduler_id: str, schedules: list[Schedule]) -> None:
"""
Release the claims on the given schedules and update them on the store.
:param scheduler_id: unique identifier of the scheduler
:param schedules: the previously claimed schedules
"""
@abstractmethod
def get_next_schedule_run_time(self) -> datetime | None:
"""
Return the earliest upcoming run time of all the schedules in the store, or
``None`` if there are no active schedules.
"""
@abstractmethod
def add_job(self, job: Job) -> None:
"""
Add a job to be executed by an eligible worker.
:param job: the job object
"""
@abstractmethod
def get_jobs(self, ids: Iterable[UUID] | None = None) -> list[Job]:
"""
Get the list of pending jobs.
:param ids: a specific set of job IDs to return, or ``None`` to return all jobs
:return: the list of matching pending jobs, in the order they will be given to
workers
"""
@abstractmethod
def acquire_jobs(self, worker_id: str, limit: int | None = None) -> list[Job]:
"""
Acquire unclaimed jobs for execution.
This method claims up to the requested number of jobs for the given worker and
returns them.
:param worker_id: unique identifier of the worker
:param limit: maximum number of jobs to claim and return
:return: the list of claimed jobs
"""
@abstractmethod
def release_job(self, worker_id: str, task_id: str, result: JobResult) -> None:
"""
Release the claim on the given job and record the result.
:param worker_id: unique identifier of the worker
:param task_id: the job's task ID
:param result: the result of the job
"""
@abstractmethod
def get_job_result(self, job_id: UUID) -> JobResult | None:
"""
Retrieve the result of a job.
The result is removed from the store after retrieval.
:param job_id: the identifier of the job
:return: the result, or ``None`` if the result was not found
"""
class AsyncDataStore:
"""Asynchronous version of :class:`DataStore`. Expected to work on asyncio."""
@abstractmethod
async def start(self, event_broker: AsyncEventBroker) -> None:
pass
@abstractmethod
async def stop(self, *, force: bool = False) -> None:
pass
@property
@abstractmethod
def events(self) -> EventSource:
pass
@abstractmethod
async def add_task(self, task: Task) -> None:
"""
Add the given task to the store.
If a task with the same ID already exists, it replaces the old one but does NOT
affect task accounting (# of running jobs).
:param task: the task to be added
"""
@abstractmethod
async def remove_task(self, task_id: str) -> None:
"""
Remove the task with the given ID.
:param task_id: ID of the task to be removed
:raises TaskLookupError: if no matching task was found
"""
@abstractmethod
async def get_task(self, task_id: str) -> Task:
"""
Get an existing task definition.
:param task_id: ID of the task to be returned
:return: the matching task
:raises TaskLookupError: if no matching task was found
"""
@abstractmethod
async def get_tasks(self) -> list[Task]:
"""
Get all the tasks in this store.
:return: a list of tasks, sorted by ID
"""
@abstractmethod
async def get_schedules(self, ids: set[str] | None = None) -> list[Schedule]:
"""
Get schedules from the data store.
:param ids: a specific set of schedule IDs to return, or ``None`` to return all
schedules
:return: the list of matching schedules, in unspecified order
"""
@abstractmethod
async def add_schedule(
self, schedule: Schedule, conflict_policy: ConflictPolicy
) -> None:
"""
Add or update the given schedule in the data store.
:param schedule: schedule to be added
:param conflict_policy: policy that determines what to do if there is an
existing schedule with the same ID
"""
@abstractmethod
async def remove_schedules(self, ids: Iterable[str]) -> None:
"""
Remove schedules from the data store.
:param ids: a specific set of schedule IDs to remove
"""
@abstractmethod
async def acquire_schedules(self, scheduler_id: str, limit: int) -> list[Schedule]:
"""
Acquire unclaimed due schedules for processing.
This method claims up to the requested number of schedules for the given
scheduler and returns them.
:param scheduler_id: unique identifier of the scheduler
:param limit: maximum number of schedules to claim
:return: the list of claimed schedules
"""
@abstractmethod
async def release_schedules(
self, scheduler_id: str, schedules: list[Schedule]
) -> None:
"""
Release the claims on the given schedules and update them on the store.
:param scheduler_id: unique identifier of the scheduler
:param schedules: the previously claimed schedules
"""
@abstractmethod
async def get_next_schedule_run_time(self) -> datetime | None:
"""
Return the earliest upcoming run time of all the schedules in the store, or
``None`` if there are no active schedules.
"""
@abstractmethod
async def add_job(self, job: Job) -> None:
"""
Add a job to be executed by an eligible worker.
:param job: the job object
"""
@abstractmethod
async def get_jobs(self, ids: Iterable[UUID] | None = None) -> list[Job]:
"""
Get the list of pending jobs.
:param ids: a specific set of job IDs to return, or ``None`` to return all jobs
:return: the list of matching pending jobs, in the order they will be given to
workers
"""
@abstractmethod
async def acquire_jobs(self, worker_id: str, limit: int | None = None) -> list[Job]:
"""
Acquire unclaimed jobs for execution.
This method claims up to the requested number of jobs for the given worker and
returns them.
:param worker_id: unique identifier of the worker
:param limit: maximum number of jobs to claim and return
:return: the list of claimed jobs
"""
@abstractmethod
async def release_job(
self, worker_id: str, task_id: str, result: JobResult
) -> None:
"""
Release the claim on the given job and record the result.
:param worker_id: unique identifier of the worker
:param task_id: the job's task ID
:param result: the result of the job
"""
@abstractmethod
async def get_job_result(self, job_id: UUID) -> JobResult | None:
"""
Retrieve the result of a job.
The result is removed from the store after retrieval.
:param job_id: the identifier of the job
:return: the result, or ``None`` if the result was not found
""" | APScheduler | /APScheduler-4.0.0a1.tar.gz/APScheduler-4.0.0a1/src/apscheduler/abc.py | abc.py |
from __future__ import annotations
import sys
from datetime import date, datetime, timedelta, timezone, tzinfo
from typing import Any
import attrs
from attrs import Attribute
from tzlocal import get_localzone
from ._exceptions import DeserializationError
from .abc import Trigger
if sys.version_info >= (3, 9):
from zoneinfo import ZoneInfo
else:
from backports.zoneinfo import ZoneInfo
def as_int(value) -> int | None:
"""Convert the value into an integer."""
if value is None:
return None
return int(value)
def as_timezone(value: str | tzinfo | None) -> tzinfo:
"""
Convert the value into a tzinfo object.
If ``value`` is ``None`` or ``'local'``, use the local timezone.
:param value: the value to be converted
:return: a timezone object
"""
if value is None or value == "local":
return get_localzone()
elif isinstance(value, str):
return ZoneInfo(value)
elif isinstance(value, tzinfo):
if value is timezone.utc:
return ZoneInfo("UTC")
else:
return value
raise TypeError(
f"Expected tzinfo instance or timezone name, got "
f"{value.__class__.__qualname__} instead"
)
def as_date(value: date | str | None) -> date | None:
"""
Convert the value to a date.
:param value: the value to convert to a date
:return: a date object, or ``None`` if ``None`` was given
"""
if value is None:
return None
elif isinstance(value, str):
return date.fromisoformat(value)
elif isinstance(value, date):
return value
raise TypeError(
f"Expected string or date, got {value.__class__.__qualname__} instead"
)
def as_timestamp(value: datetime | None) -> float | None:
if value is None:
return None
return value.timestamp()
def as_ordinal_date(value: date | None) -> int | None:
if value is None:
return None
return value.toordinal()
def as_aware_datetime(value: datetime | str | None) -> datetime | None:
"""
Convert the value to a timezone aware datetime.
:param value: a datetime, an ISO 8601 representation of a datetime, or ``None``
:param tz: timezone to use for making the datetime timezone aware
:return: a timezone aware datetime, or ``None`` if ``None`` was given
"""
if value is None:
return None
if isinstance(value, str):
if value.upper().endswith("Z"):
value = value[:-1] + "+00:00"
value = datetime.fromisoformat(value)
if isinstance(value, datetime):
if not value.tzinfo:
return value.replace(tzinfo=get_localzone())
else:
return value
raise TypeError(
f"Expected string or datetime, got {value.__class__.__qualname__} instead"
)
def positive_number(instance, attribute, value) -> None:
if value <= 0:
raise ValueError(f"Expected positive number, got {value} instead")
def non_negative_number(instance, attribute, value) -> None:
if value < 0:
raise ValueError(f"Expected non-negative number, got {value} instead")
def as_positive_integer(value, name: str) -> int:
if isinstance(value, int):
if value > 0:
return value
else:
raise ValueError(f"{name} must be positive")
raise TypeError(
f"{name} must be an integer, got {value.__class__.__name__} instead"
)
def as_timedelta(value: timedelta | float) -> timedelta:
if isinstance(value, (int, float)):
return timedelta(seconds=value)
elif isinstance(value, timedelta):
return value
# raise TypeError(f'{attribute.name} must be a timedelta or number of seconds, got '
# f'{value.__class__.__name__} instead')
def as_list(value, element_type: type, name: str) -> list:
value = list(value)
for i, element in enumerate(value):
if not isinstance(element, element_type):
raise TypeError(
f"Element at index {i} of {name} is not of the expected type "
f"({element_type.__name__}"
)
return value
def aware_datetime(instance: Any, attribute: Attribute, value: datetime) -> None:
if not value.tzinfo:
raise ValueError(f"{attribute.name} must be a timezone aware datetime")
def require_state_version(
trigger: Trigger, state: dict[str, Any], max_version: int
) -> None:
try:
if state["version"] > max_version:
raise DeserializationError(
f"{trigger.__class__.__name__} received a serialized state with "
f'version {state["version"]}, but it only supports up to version '
f"{max_version}. This can happen when an older version of APScheduler "
f"is being used with a data store that was previously used with a "
f"newer APScheduler version."
)
except KeyError as exc:
raise DeserializationError(
'Missing "version" key in the serialized state'
) from exc
def positive_integer(inst, field: attrs.Attribute, value) -> None:
if value <= 0:
raise ValueError(f"{field} must be a positive integer") | APScheduler | /APScheduler-4.0.0a1.tar.gz/APScheduler-4.0.0a1/src/apscheduler/_validators.py | _validators.py |
from __future__ import annotations
__all__ = [
"CoalescePolicy",
"ConflictPolicy",
"ConflictingIdError",
"DataStoreEvent",
"DeserializationError",
"Event",
"Job",
"JobAcquired",
"JobAdded",
"JobCancelled",
"JobDeadlineMissed",
"JobDeserializationFailed",
"JobInfo",
"JobLookupError",
"JobOutcome",
"JobReleased",
"JobRemoved",
"JobResult",
"JobResultNotReady",
"MaxIterationsReached",
"RetrySettings",
"RunState",
"Schedule",
"ScheduleLookupError",
"SerializationError",
"ScheduleAdded",
"ScheduleUpdated",
"ScheduleRemoved",
"ScheduleDeserializationFailed",
"SchedulerEvent",
"SchedulerStarted",
"SchedulerStopped",
"Task",
"TaskAdded",
"TaskLookupError",
"TaskUpdated",
"TaskRemoved",
"WorkerEvent",
"WorkerStarted",
"WorkerStopped",
"current_scheduler",
"current_worker",
"current_job",
]
from typing import Any
from ._context import current_job, current_scheduler, current_worker
from ._enums import CoalescePolicy, ConflictPolicy, JobOutcome, RunState
from ._events import (
DataStoreEvent,
Event,
JobAcquired,
JobAdded,
JobDeserializationFailed,
JobReleased,
JobRemoved,
ScheduleAdded,
ScheduleDeserializationFailed,
ScheduleRemoved,
SchedulerEvent,
SchedulerStarted,
SchedulerStopped,
ScheduleUpdated,
TaskAdded,
TaskRemoved,
TaskUpdated,
WorkerEvent,
WorkerStarted,
WorkerStopped,
)
from ._exceptions import (
ConflictingIdError,
DeserializationError,
JobCancelled,
JobDeadlineMissed,
JobLookupError,
JobResultNotReady,
MaxIterationsReached,
ScheduleLookupError,
SerializationError,
TaskLookupError,
)
from ._structures import Job, JobInfo, JobResult, RetrySettings, Schedule, Task
# Re-export imports, so they look like they live directly in this package
value: Any
for value in list(locals().values()):
if getattr(value, "__module__", "").startswith("apscheduler."):
value.__module__ = __name__ | APScheduler | /APScheduler-4.0.0a1.tar.gz/APScheduler-4.0.0a1/src/apscheduler/__init__.py | __init__.py |
from __future__ import annotations
from concurrent.futures import Future
from threading import Thread
import attrs
from redis import ConnectionPool, Redis
from .._events import Event
from ..abc import Serializer
from ..serializers.json import JSONSerializer
from .base import DistributedEventBrokerMixin
from .local import LocalEventBroker
@attrs.define(eq=False)
class RedisEventBroker(LocalEventBroker, DistributedEventBrokerMixin):
"""
An event broker that uses a Redis server to broadcast events.
Requires the redis_ library to be installed.
.. _redis: https://pypi.org/project/redis/
:param client: a (synchronous) Redis client
:param serializer: the serializer used to (de)serialize events for transport
:param channel: channel on which to send the messages
:param message_poll_interval: interval on which to poll for new messages (higher
values mean slower reaction time but less CPU use)
"""
client: Redis
serializer: Serializer = attrs.field(factory=JSONSerializer)
channel: str = attrs.field(kw_only=True, default="apscheduler")
message_poll_interval: float = attrs.field(kw_only=True, default=0.05)
_stopped: bool = attrs.field(init=False, default=True)
_ready_future: Future[None] = attrs.field(init=False)
_thread: Thread = attrs.field(init=False)
@classmethod
def from_url(cls, url: str, **kwargs) -> RedisEventBroker:
"""
Create a new event broker from a URL.
:param url: a Redis URL (```redis://...```)
:param kwargs: keyword arguments to pass to the initializer of this class
:return: the newly created event broker
"""
pool = ConnectionPool.from_url(url, **kwargs)
client = Redis(connection_pool=pool)
return cls(client)
def start(self) -> None:
self._stopped = False
self._ready_future = Future()
self._thread = Thread(
target=self._listen_messages, daemon=True, name="Redis subscriber"
)
self._thread.start()
self._ready_future.result(10)
super().start()
def stop(self, *, force: bool = False) -> None:
self._stopped = True
if not force:
self._thread.join(5)
super().stop(force=force)
def _listen_messages(self) -> None:
while not self._stopped:
try:
pubsub = self.client.pubsub()
pubsub.subscribe(self.channel)
except BaseException as exc:
if not self._ready_future.done():
self._ready_future.set_exception(exc)
raise
else:
if not self._ready_future.done():
self._ready_future.set_result(None)
try:
while not self._stopped:
msg = pubsub.get_message(timeout=self.message_poll_interval)
if msg and isinstance(msg["data"], bytes):
event = self.reconstitute_event(msg["data"])
if event is not None:
self.publish_local(event)
except BaseException:
self._logger.exception("Subscriber crashed")
raise
finally:
pubsub.close()
def publish(self, event: Event) -> None:
notification = self.generate_notification(event)
self.client.publish(self.channel, notification) | APScheduler | /APScheduler-4.0.0a1.tar.gz/APScheduler-4.0.0a1/src/apscheduler/eventbrokers/redis.py | redis.py |
from __future__ import annotations
from base64 import b64decode, b64encode
from logging import Logger, getLogger
from typing import Any, Callable, Iterable
import attrs
from .. import _events
from .._events import Event
from .._exceptions import DeserializationError
from ..abc import EventSource, Serializer, Subscription
@attrs.define(eq=False, frozen=True)
class LocalSubscription(Subscription):
callback: Callable[[Event], Any]
event_types: set[type[Event]] | None
one_shot: bool
token: object
_source: BaseEventBroker
def unsubscribe(self) -> None:
self._source.unsubscribe(self.token)
@attrs.define(eq=False)
class BaseEventBroker(EventSource):
_logger: Logger = attrs.field(init=False)
_subscriptions: dict[object, LocalSubscription] = attrs.field(
init=False, factory=dict
)
def __attrs_post_init__(self) -> None:
self._logger = getLogger(self.__class__.__module__)
def subscribe(
self,
callback: Callable[[Event], Any],
event_types: Iterable[type[Event]] | None = None,
*,
one_shot: bool = False,
) -> Subscription:
types = set(event_types) if event_types else None
token = object()
subscription = LocalSubscription(callback, types, one_shot, token, self)
self._subscriptions[token] = subscription
return subscription
def unsubscribe(self, token: object) -> None:
self._subscriptions.pop(token, None)
class DistributedEventBrokerMixin:
serializer: Serializer
_logger: Logger
def generate_notification(self, event: Event) -> bytes:
serialized = self.serializer.serialize(event.marshal(self.serializer))
return event.__class__.__name__.encode("ascii") + b" " + serialized
def generate_notification_str(self, event: Event) -> str:
serialized = self.serializer.serialize(event.marshal(self.serializer))
return event.__class__.__name__ + " " + b64encode(serialized).decode("ascii")
def _reconstitute_event(self, event_type: str, serialized: bytes) -> Event | None:
try:
kwargs = self.serializer.deserialize(serialized)
except DeserializationError:
self._logger.exception(
"Failed to deserialize an event of type %s",
event_type,
extra={"serialized": serialized},
)
return None
try:
event_class = getattr(_events, event_type)
except AttributeError:
self._logger.error(
"Receive notification for a nonexistent event type: %s",
event_type,
extra={"serialized": serialized},
)
return None
try:
return event_class.unmarshal(self.serializer, kwargs)
except Exception:
self._logger.exception("Error reconstituting event of type %s", event_type)
return None
def reconstitute_event(self, payload: bytes) -> Event | None:
try:
event_type_bytes, serialized = payload.split(b" ", 1)
except ValueError:
self._logger.error(
"Received malformatted notification", extra={"payload": payload}
)
return None
event_type = event_type_bytes.decode("ascii", errors="replace")
return self._reconstitute_event(event_type, serialized)
def reconstitute_event_str(self, payload: str) -> Event | None:
try:
event_type, b64_serialized = payload.split(" ", 1)
except ValueError:
self._logger.error(
"Received malformatted notification", extra={"payload": payload}
)
return None
return self._reconstitute_event(event_type, b64decode(b64_serialized)) | APScheduler | /APScheduler-4.0.0a1.tar.gz/APScheduler-4.0.0a1/src/apscheduler/eventbrokers/base.py | base.py |
from __future__ import annotations
from asyncio import iscoroutinefunction
from concurrent.futures import ThreadPoolExecutor
from contextlib import ExitStack
from threading import Lock
from typing import Any, Callable, Iterable
import attrs
from .._events import Event
from ..abc import EventBroker, Subscription
from .base import BaseEventBroker
@attrs.define(eq=False)
class LocalEventBroker(EventBroker, BaseEventBroker):
"""
Synchronous, local event broker.
This event broker only broadcasts within the process it runs in, and is therefore
not suitable for multi-node or multiprocess use cases.
Does not serialize events.
"""
_executor: ThreadPoolExecutor = attrs.field(init=False)
_exit_stack: ExitStack = attrs.field(init=False)
_subscriptions_lock: Lock = attrs.field(init=False, factory=Lock)
def start(self) -> None:
self._executor = ThreadPoolExecutor(1)
def stop(self, *, force: bool = False) -> None:
self._executor.shutdown(wait=not force)
del self._executor
def subscribe(
self,
callback: Callable[[Event], Any],
event_types: Iterable[type[Event]] | None = None,
*,
one_shot: bool = False,
) -> Subscription:
if iscoroutinefunction(callback):
raise ValueError(
"Coroutine functions are not supported as callbacks on a synchronous "
"event source"
)
with self._subscriptions_lock:
return super().subscribe(callback, event_types, one_shot=one_shot)
def unsubscribe(self, token: object) -> None:
with self._subscriptions_lock:
super().unsubscribe(token)
def publish(self, event: Event) -> None:
self.publish_local(event)
def publish_local(self, event: Event) -> None:
event_type = type(event)
with self._subscriptions_lock:
one_shot_tokens: list[object] = []
for _token, subscription in self._subscriptions.items():
if (
subscription.event_types is None
or event_type in subscription.event_types
):
self._executor.submit(
self._deliver_event, subscription.callback, event
)
if subscription.one_shot:
one_shot_tokens.append(subscription.token)
for token in one_shot_tokens:
super().unsubscribe(token)
def _deliver_event(self, func: Callable[[Event], Any], event: Event) -> None:
try:
func(event)
except BaseException:
self._logger.exception(
"Error delivering %s event", event.__class__.__name__
) | APScheduler | /APScheduler-4.0.0a1.tar.gz/APScheduler-4.0.0a1/src/apscheduler/eventbrokers/local.py | local.py |
from __future__ import annotations
from asyncio import iscoroutine
from typing import Any, Callable
import attrs
from anyio import create_task_group
from anyio.abc import TaskGroup
from .._events import Event
from ..abc import AsyncEventBroker
from .base import BaseEventBroker
@attrs.define(eq=False)
class LocalAsyncEventBroker(AsyncEventBroker, BaseEventBroker):
"""
Asynchronous, local event broker.
This event broker only broadcasts within the process it runs in, and is therefore
not suitable for multi-node or multiprocess use cases.
Does not serialize events.
"""
_task_group: TaskGroup = attrs.field(init=False)
async def start(self) -> None:
self._task_group = create_task_group()
await self._task_group.__aenter__()
async def stop(self, *, force: bool = False) -> None:
await self._task_group.__aexit__(None, None, None)
del self._task_group
async def publish(self, event: Event) -> None:
await self.publish_local(event)
async def publish_local(self, event: Event) -> None:
event_type = type(event)
one_shot_tokens: list[object] = []
for _token, subscription in self._subscriptions.items():
if (
subscription.event_types is None
or event_type in subscription.event_types
):
self._task_group.start_soon(
self._deliver_event, subscription.callback, event
)
if subscription.one_shot:
one_shot_tokens.append(subscription.token)
for token in one_shot_tokens:
super().unsubscribe(token)
async def _deliver_event(self, func: Callable[[Event], Any], event: Event) -> None:
try:
retval = func(event)
if iscoroutine(retval):
await retval
except BaseException:
self._logger.exception(
"Error delivering %s event", event.__class__.__name__
) | APScheduler | /APScheduler-4.0.0a1.tar.gz/APScheduler-4.0.0a1/src/apscheduler/eventbrokers/async_local.py | async_local.py |
from __future__ import annotations
from contextlib import asynccontextmanager
from typing import TYPE_CHECKING, AsyncContextManager, AsyncGenerator, Callable
import attrs
from anyio import TASK_STATUS_IGNORED, CancelScope, sleep
from asyncpg import Connection
from asyncpg.pool import Pool
from .._events import Event
from .._exceptions import SerializationError
from ..abc import Serializer
from ..serializers.json import JSONSerializer
from .async_local import LocalAsyncEventBroker
from .base import DistributedEventBrokerMixin
if TYPE_CHECKING:
from sqlalchemy.ext.asyncio import AsyncEngine
@attrs.define(eq=False)
class AsyncpgEventBroker(LocalAsyncEventBroker, DistributedEventBrokerMixin):
"""
An asynchronous, asyncpg_ based event broker that uses a PostgreSQL server to
broadcast events using its ``NOTIFY`` mechanism.
.. _asyncpg: https://pypi.org/project/asyncpg/
:param connection_factory: a callable that creates an async context manager that
yields a new asyncpg connection
:param serializer: the serializer used to (de)serialize events for transport
:param channel: the ``NOTIFY`` channel to use
:param max_idle_time: maximum time to let the connection go idle, before sending a
``SELECT 1`` query to prevent a connection timeout
"""
connection_factory: Callable[[], AsyncContextManager[Connection]]
serializer: Serializer = attrs.field(kw_only=True, factory=JSONSerializer)
channel: str = attrs.field(kw_only=True, default="apscheduler")
max_idle_time: float = attrs.field(kw_only=True, default=30)
_listen_cancel_scope: CancelScope = attrs.field(init=False)
@classmethod
def from_asyncpg_pool(cls, pool: Pool, **kwargs) -> AsyncpgEventBroker:
"""
Create a new asyncpg event broker from an existing asyncpg connection pool.
:param pool: an asyncpg connection pool
:param kwargs: keyword arguments to pass to the initializer of this class
:return: the newly created event broker
"""
return cls(pool.acquire, **kwargs)
@classmethod
def from_async_sqla_engine(
cls, engine: AsyncEngine, **kwargs
) -> AsyncpgEventBroker:
"""
Create a new asyncpg event broker from an SQLAlchemy engine.
:param engine: an asynchronous SQLAlchemy engine using asyncpg as the driver
:type engine: ~sqlalchemy.ext.asyncio.AsyncEngine
:param kwargs: keyword arguments to pass to the initializer of this class
:return: the newly created event broker
"""
if engine.dialect.driver != "asyncpg":
raise ValueError(
f'The driver in the engine must be "asyncpg" (current: '
f"{engine.dialect.driver})"
)
@asynccontextmanager
async def connection_factory() -> AsyncGenerator[Connection, None]:
conn = await engine.raw_connection()
try:
yield conn.connection._connection
finally:
conn.close()
return cls(connection_factory, **kwargs)
async def start(self) -> None:
await super().start()
self._listen_cancel_scope = await self._task_group.start(
self._listen_notifications
)
async def stop(self, *, force: bool = False) -> None:
self._listen_cancel_scope.cancel()
await super().stop(force=force)
async def _listen_notifications(self, *, task_status=TASK_STATUS_IGNORED) -> None:
def callback(connection, pid, channel: str, payload: str) -> None:
event = self.reconstitute_event_str(payload)
if event is not None:
self._task_group.start_soon(self.publish_local, event)
task_started_sent = False
with CancelScope() as cancel_scope:
while True:
async with self.connection_factory() as conn:
await conn.add_listener(self.channel, callback)
if not task_started_sent:
task_status.started(cancel_scope)
task_started_sent = True
try:
while True:
await sleep(self.max_idle_time)
await conn.execute("SELECT 1")
finally:
await conn.remove_listener(self.channel, callback)
async def publish(self, event: Event) -> None:
notification = self.generate_notification_str(event)
if len(notification) > 7999:
raise SerializationError(
"Serialized event object exceeds 7999 bytes in size"
)
async with self.connection_factory() as conn:
await conn.execute("SELECT pg_notify($1, $2)", self.channel, notification)
return | APScheduler | /APScheduler-4.0.0a1.tar.gz/APScheduler-4.0.0a1/src/apscheduler/eventbrokers/asyncpg.py | asyncpg.py |
from __future__ import annotations
from concurrent.futures import Future
from typing import Any
import attrs
from paho.mqtt.client import Client, MQTTMessage
from paho.mqtt.properties import Properties
from paho.mqtt.reasoncodes import ReasonCodes
from .._events import Event
from ..abc import Serializer
from ..serializers.json import JSONSerializer
from .base import DistributedEventBrokerMixin
from .local import LocalEventBroker
@attrs.define(eq=False)
class MQTTEventBroker(LocalEventBroker, DistributedEventBrokerMixin):
"""
An event broker that uses an MQTT (v3.1 or v5) broker to broadcast events.
Requires the paho-mqtt_ library to be installed.
.. _paho-mqtt: https://pypi.org/project/paho-mqtt/
:param client: a paho-mqtt client
:param serializer: the serializer used to (de)serialize events for transport
:param host: host name or IP address to connect to
:param port: TCP port number to connect to
:param topic: topic on which to send the messages
:param subscribe_qos: MQTT QoS to use for subscribing messages
:param publish_qos: MQTT QoS to use for publishing messages
"""
client: Client
serializer: Serializer = attrs.field(factory=JSONSerializer)
host: str = attrs.field(kw_only=True, default="localhost")
port: int = attrs.field(kw_only=True, default=1883)
topic: str = attrs.field(kw_only=True, default="apscheduler")
subscribe_qos: int = attrs.field(kw_only=True, default=0)
publish_qos: int = attrs.field(kw_only=True, default=0)
_ready_future: Future[None] = attrs.field(init=False)
def start(self) -> None:
super().start()
self._ready_future = Future()
self.client.enable_logger(self._logger)
self.client.on_connect = self._on_connect
self.client.on_message = self._on_message
self.client.on_subscribe = self._on_subscribe
self.client.connect(self.host, self.port)
self.client.loop_start()
self._ready_future.result(10)
def stop(self, *, force: bool = False) -> None:
self.client.disconnect()
self.client.loop_stop(force=force)
super().stop()
def _on_connect(
self,
client: Client,
userdata: Any,
flags: dict[str, Any],
rc: ReasonCodes | int,
properties: Properties | None = None,
) -> None:
try:
client.subscribe(self.topic, qos=self.subscribe_qos)
except Exception as exc:
self._ready_future.set_exception(exc)
raise
def _on_subscribe(
self, client: Client, userdata: Any, mid, granted_qos: list[int]
) -> None:
self._ready_future.set_result(None)
def _on_message(self, client: Client, userdata: Any, msg: MQTTMessage) -> None:
event = self.reconstitute_event(msg.payload)
if event is not None:
self.publish_local(event)
def publish(self, event: Event) -> None:
notification = self.generate_notification(event)
self.client.publish(self.topic, notification, qos=self.publish_qos) | APScheduler | /APScheduler-4.0.0a1.tar.gz/APScheduler-4.0.0a1/src/apscheduler/eventbrokers/mqtt.py | mqtt.py |
from __future__ import annotations
from collections import defaultdict
from datetime import datetime, timedelta, timezone
from typing import Any, Iterable
from uuid import UUID
import anyio
import attrs
import sniffio
import tenacity
from sqlalchemy import and_, bindparam, or_, select
from sqlalchemy.engine import URL, Result
from sqlalchemy.exc import IntegrityError, InterfaceError
from sqlalchemy.ext.asyncio import create_async_engine
from sqlalchemy.ext.asyncio.engine import AsyncEngine
from sqlalchemy.sql.ddl import DropTable
from sqlalchemy.sql.elements import BindParameter
from .._enums import ConflictPolicy
from .._events import (
DataStoreEvent,
JobAcquired,
JobAdded,
JobDeserializationFailed,
ScheduleAdded,
ScheduleDeserializationFailed,
ScheduleRemoved,
ScheduleUpdated,
TaskAdded,
TaskRemoved,
TaskUpdated,
)
from .._exceptions import ConflictingIdError, SerializationError, TaskLookupError
from .._structures import Job, JobResult, Schedule, Task
from ..abc import AsyncEventBroker
from ..marshalling import callable_to_ref
from .base import BaseAsyncDataStore
from .sqlalchemy import _BaseSQLAlchemyDataStore
@attrs.define(eq=False)
class AsyncSQLAlchemyDataStore(_BaseSQLAlchemyDataStore, BaseAsyncDataStore):
"""
Uses a relational database to store data.
When started, this data store creates the appropriate tables on the given database
if they're not already present.
Operations are retried (in accordance to ``retry_settings``) when an operation
raises :exc:`sqlalchemy.OperationalError`.
This store has been tested to work with PostgreSQL (asyncpg driver) and MySQL
(asyncmy driver).
:param engine: an asynchronous SQLAlchemy engine
:param schema: a database schema name to use, if not the default
:param serializer: the serializer used to (de)serialize tasks, schedules and jobs
for storage
:param lock_expiration_delay: maximum amount of time (in seconds) that a scheduler
or worker can keep a lock on a schedule or task
:param retry_settings: Tenacity settings for retrying operations in case of a
database connecitivty problem
:param start_from_scratch: erase all existing data during startup (useful for test
suites)
"""
engine: AsyncEngine
@classmethod
def from_url(cls, url: str | URL, **options) -> AsyncSQLAlchemyDataStore:
"""
Create a new asynchronous SQLAlchemy data store.
:param url: an SQLAlchemy URL to pass to :func:`~sqlalchemy.create_engine`
(must use an async dialect like ``asyncpg`` or ``asyncmy``)
:param kwargs: keyword arguments to pass to the initializer of this class
:return: the newly created data store
"""
engine = create_async_engine(url, future=True)
return cls(engine, **options)
def _retry(self) -> tenacity.AsyncRetrying:
# OSError is raised by asyncpg if it can't connect
return tenacity.AsyncRetrying(
stop=self.retry_settings.stop,
wait=self.retry_settings.wait,
retry=tenacity.retry_if_exception_type((InterfaceError, OSError)),
after=self._after_attempt,
sleep=anyio.sleep,
reraise=True,
)
async def start(self, event_broker: AsyncEventBroker) -> None:
await super().start(event_broker)
asynclib = sniffio.current_async_library() or "(unknown)"
if asynclib != "asyncio":
raise RuntimeError(
f"This data store requires asyncio; currently running: {asynclib}"
)
# Verify that the schema is in place
async for attempt in self._retry():
with attempt:
async with self.engine.begin() as conn:
if self.start_from_scratch:
for table in self._metadata.sorted_tables:
await conn.execute(DropTable(table, if_exists=True))
await conn.run_sync(self._metadata.create_all)
query = select(self.t_metadata.c.schema_version)
result = await conn.execute(query)
version = result.scalar()
if version is None:
await conn.execute(
self.t_metadata.insert(values={"schema_version": 1})
)
elif version > 1:
raise RuntimeError(
f"Unexpected schema version ({version}); "
f"only version 1 is supported by this version of "
f"APScheduler"
)
async def _deserialize_schedules(self, result: Result) -> list[Schedule]:
schedules: list[Schedule] = []
for row in result:
try:
schedules.append(Schedule.unmarshal(self.serializer, row._asdict()))
except SerializationError as exc:
await self._events.publish(
ScheduleDeserializationFailed(schedule_id=row["id"], exception=exc)
)
return schedules
async def _deserialize_jobs(self, result: Result) -> list[Job]:
jobs: list[Job] = []
for row in result:
try:
jobs.append(Job.unmarshal(self.serializer, row._asdict()))
except SerializationError as exc:
await self._events.publish(
JobDeserializationFailed(job_id=row["id"], exception=exc)
)
return jobs
async def add_task(self, task: Task) -> None:
insert = self.t_tasks.insert().values(
id=task.id,
func=callable_to_ref(task.func),
max_running_jobs=task.max_running_jobs,
misfire_grace_time=task.misfire_grace_time,
)
try:
async for attempt in self._retry():
with attempt:
async with self.engine.begin() as conn:
await conn.execute(insert)
except IntegrityError:
update = (
self.t_tasks.update()
.values(
func=callable_to_ref(task.func),
max_running_jobs=task.max_running_jobs,
misfire_grace_time=task.misfire_grace_time,
)
.where(self.t_tasks.c.id == task.id)
)
async for attempt in self._retry():
with attempt:
async with self.engine.begin() as conn:
await conn.execute(update)
await self._events.publish(TaskUpdated(task_id=task.id))
else:
await self._events.publish(TaskAdded(task_id=task.id))
async def remove_task(self, task_id: str) -> None:
delete = self.t_tasks.delete().where(self.t_tasks.c.id == task_id)
async for attempt in self._retry():
with attempt:
async with self.engine.begin() as conn:
result = await conn.execute(delete)
if result.rowcount == 0:
raise TaskLookupError(task_id)
else:
await self._events.publish(TaskRemoved(task_id=task_id))
async def get_task(self, task_id: str) -> Task:
query = select(
[
self.t_tasks.c.id,
self.t_tasks.c.func,
self.t_tasks.c.max_running_jobs,
self.t_tasks.c.state,
self.t_tasks.c.misfire_grace_time,
]
).where(self.t_tasks.c.id == task_id)
async for attempt in self._retry():
with attempt:
async with self.engine.begin() as conn:
result = await conn.execute(query)
row = result.first()
if row:
return Task.unmarshal(self.serializer, row._asdict())
else:
raise TaskLookupError(task_id)
async def get_tasks(self) -> list[Task]:
query = select(
[
self.t_tasks.c.id,
self.t_tasks.c.func,
self.t_tasks.c.max_running_jobs,
self.t_tasks.c.state,
self.t_tasks.c.misfire_grace_time,
]
).order_by(self.t_tasks.c.id)
async for attempt in self._retry():
with attempt:
async with self.engine.begin() as conn:
result = await conn.execute(query)
tasks = [
Task.unmarshal(self.serializer, row._asdict()) for row in result
]
return tasks
async def add_schedule(
self, schedule: Schedule, conflict_policy: ConflictPolicy
) -> None:
event: DataStoreEvent
values = schedule.marshal(self.serializer)
insert = self.t_schedules.insert().values(**values)
try:
async for attempt in self._retry():
with attempt:
async with self.engine.begin() as conn:
await conn.execute(insert)
except IntegrityError:
if conflict_policy is ConflictPolicy.exception:
raise ConflictingIdError(schedule.id) from None
elif conflict_policy is ConflictPolicy.replace:
del values["id"]
update = (
self.t_schedules.update()
.where(self.t_schedules.c.id == schedule.id)
.values(**values)
)
async for attempt in self._retry():
with attempt:
async with self.engine.begin() as conn:
await conn.execute(update)
event = ScheduleUpdated(
schedule_id=schedule.id, next_fire_time=schedule.next_fire_time
)
await self._events.publish(event)
else:
event = ScheduleAdded(
schedule_id=schedule.id, next_fire_time=schedule.next_fire_time
)
await self._events.publish(event)
async def remove_schedules(self, ids: Iterable[str]) -> None:
async for attempt in self._retry():
with attempt:
async with self.engine.begin() as conn:
delete = self.t_schedules.delete().where(
self.t_schedules.c.id.in_(ids)
)
if self._supports_update_returning:
delete = delete.returning(self.t_schedules.c.id)
removed_ids: Iterable[str] = [
row[0] for row in await conn.execute(delete)
]
else:
# TODO: actually check which rows were deleted?
await conn.execute(delete)
removed_ids = ids
for schedule_id in removed_ids:
await self._events.publish(ScheduleRemoved(schedule_id=schedule_id))
async def get_schedules(self, ids: set[str] | None = None) -> list[Schedule]:
query = self.t_schedules.select().order_by(self.t_schedules.c.id)
if ids:
query = query.where(self.t_schedules.c.id.in_(ids))
async for attempt in self._retry():
with attempt:
async with self.engine.begin() as conn:
result = await conn.execute(query)
return await self._deserialize_schedules(result)
async def acquire_schedules(self, scheduler_id: str, limit: int) -> list[Schedule]:
async for attempt in self._retry():
with attempt:
async with self.engine.begin() as conn:
now = datetime.now(timezone.utc)
acquired_until = now + timedelta(seconds=self.lock_expiration_delay)
schedules_cte = (
select(self.t_schedules.c.id)
.where(
and_(
self.t_schedules.c.next_fire_time.isnot(None),
self.t_schedules.c.next_fire_time <= now,
or_(
self.t_schedules.c.acquired_until.is_(None),
self.t_schedules.c.acquired_until < now,
),
)
)
.order_by(self.t_schedules.c.next_fire_time)
.limit(limit)
.with_for_update(skip_locked=True)
.cte()
)
subselect = select([schedules_cte.c.id])
update = (
self.t_schedules.update()
.where(self.t_schedules.c.id.in_(subselect))
.values(acquired_by=scheduler_id, acquired_until=acquired_until)
)
if self._supports_update_returning:
update = update.returning(*self.t_schedules.columns)
result = await conn.execute(update)
else:
await conn.execute(update)
query = self.t_schedules.select().where(
and_(self.t_schedules.c.acquired_by == scheduler_id)
)
result = await conn.execute(query)
schedules = await self._deserialize_schedules(result)
return schedules
async def release_schedules(
self, scheduler_id: str, schedules: list[Schedule]
) -> None:
async for attempt in self._retry():
with attempt:
async with self.engine.begin() as conn:
update_events: list[ScheduleUpdated] = []
finished_schedule_ids: list[str] = []
update_args: list[dict[str, Any]] = []
for schedule in schedules:
if schedule.next_fire_time is not None:
try:
serialized_trigger = self.serializer.serialize(
schedule.trigger
)
except SerializationError:
self._logger.exception(
"Error serializing trigger for schedule %r – "
"removing from data store",
schedule.id,
)
finished_schedule_ids.append(schedule.id)
continue
update_args.append(
{
"p_id": schedule.id,
"p_trigger": serialized_trigger,
"p_next_fire_time": schedule.next_fire_time,
}
)
else:
finished_schedule_ids.append(schedule.id)
# Update schedules that have a next fire time
if update_args:
p_id: BindParameter = bindparam("p_id")
p_trigger: BindParameter = bindparam("p_trigger")
p_next_fire_time: BindParameter = bindparam("p_next_fire_time")
update = (
self.t_schedules.update()
.where(
and_(
self.t_schedules.c.id == p_id,
self.t_schedules.c.acquired_by == scheduler_id,
)
)
.values(
trigger=p_trigger,
next_fire_time=p_next_fire_time,
acquired_by=None,
acquired_until=None,
)
)
next_fire_times = {
arg["p_id"]: arg["p_next_fire_time"] for arg in update_args
}
# TODO: actually check which rows were updated?
await conn.execute(update, update_args)
updated_ids = list(next_fire_times)
for schedule_id in updated_ids:
event = ScheduleUpdated(
schedule_id=schedule_id,
next_fire_time=next_fire_times[schedule_id],
)
update_events.append(event)
# Remove schedules that have no next fire time or failed to
# serialize
if finished_schedule_ids:
delete = self.t_schedules.delete().where(
self.t_schedules.c.id.in_(finished_schedule_ids)
)
await conn.execute(delete)
for event in update_events:
await self._events.publish(event)
for schedule_id in finished_schedule_ids:
await self._events.publish(ScheduleRemoved(schedule_id=schedule_id))
async def get_next_schedule_run_time(self) -> datetime | None:
statenent = (
select(self.t_schedules.c.next_fire_time)
.where(self.t_schedules.c.next_fire_time.isnot(None))
.order_by(self.t_schedules.c.next_fire_time)
.limit(1)
)
async for attempt in self._retry():
with attempt:
async with self.engine.begin() as conn:
result = await conn.execute(statenent)
return result.scalar()
async def add_job(self, job: Job) -> None:
marshalled = job.marshal(self.serializer)
insert = self.t_jobs.insert().values(**marshalled)
async for attempt in self._retry():
with attempt:
async with self.engine.begin() as conn:
await conn.execute(insert)
event = JobAdded(
job_id=job.id,
task_id=job.task_id,
schedule_id=job.schedule_id,
tags=job.tags,
)
await self._events.publish(event)
async def get_jobs(self, ids: Iterable[UUID] | None = None) -> list[Job]:
query = self.t_jobs.select().order_by(self.t_jobs.c.id)
if ids:
job_ids = [job_id for job_id in ids]
query = query.where(self.t_jobs.c.id.in_(job_ids))
async for attempt in self._retry():
with attempt:
async with self.engine.begin() as conn:
result = await conn.execute(query)
return await self._deserialize_jobs(result)
async def acquire_jobs(self, worker_id: str, limit: int | None = None) -> list[Job]:
async for attempt in self._retry():
with attempt:
async with self.engine.begin() as conn:
now = datetime.now(timezone.utc)
acquired_until = now + timedelta(seconds=self.lock_expiration_delay)
query = (
self.t_jobs.select()
.join(self.t_tasks, self.t_tasks.c.id == self.t_jobs.c.task_id)
.where(
or_(
self.t_jobs.c.acquired_until.is_(None),
self.t_jobs.c.acquired_until < now,
)
)
.order_by(self.t_jobs.c.created_at)
.with_for_update(skip_locked=True)
.limit(limit)
)
result = await conn.execute(query)
if not result:
return []
# Mark the jobs as acquired by this worker
jobs = await self._deserialize_jobs(result)
task_ids: set[str] = {job.task_id for job in jobs}
# Retrieve the limits
query = select(
[
self.t_tasks.c.id,
self.t_tasks.c.max_running_jobs
- self.t_tasks.c.running_jobs,
]
).where(
self.t_tasks.c.max_running_jobs.isnot(None),
self.t_tasks.c.id.in_(task_ids),
)
result = await conn.execute(query)
job_slots_left: dict[str, int] = dict(result.fetchall())
# Filter out jobs that don't have free slots
acquired_jobs: list[Job] = []
increments: dict[str, int] = defaultdict(lambda: 0)
for job in jobs:
# Don't acquire the job if there are no free slots left
slots_left = job_slots_left.get(job.task_id)
if slots_left == 0:
continue
elif slots_left is not None:
job_slots_left[job.task_id] -= 1
acquired_jobs.append(job)
increments[job.task_id] += 1
if acquired_jobs:
# Mark the acquired jobs as acquired by this worker
acquired_job_ids = [job.id for job in acquired_jobs]
update = (
self.t_jobs.update()
.values(
acquired_by=worker_id, acquired_until=acquired_until
)
.where(self.t_jobs.c.id.in_(acquired_job_ids))
)
await conn.execute(update)
# Increment the running job counters on each task
p_id: BindParameter = bindparam("p_id")
p_increment: BindParameter = bindparam("p_increment")
params = [
{"p_id": task_id, "p_increment": increment}
for task_id, increment in increments.items()
]
update = (
self.t_tasks.update()
.values(
running_jobs=self.t_tasks.c.running_jobs + p_increment
)
.where(self.t_tasks.c.id == p_id)
)
await conn.execute(update, params)
# Publish the appropriate events
for job in acquired_jobs:
await self._events.publish(JobAcquired(job_id=job.id, worker_id=worker_id))
return acquired_jobs
async def release_job(
self, worker_id: str, task_id: str, result: JobResult
) -> None:
async for attempt in self._retry():
with attempt:
async with self.engine.begin() as conn:
# Record the job result
if result.expires_at > result.finished_at:
marshalled = result.marshal(self.serializer)
insert = self.t_job_results.insert().values(**marshalled)
await conn.execute(insert)
# Decrement the number of running jobs for this task
update = (
self.t_tasks.update()
.values(running_jobs=self.t_tasks.c.running_jobs - 1)
.where(self.t_tasks.c.id == task_id)
)
await conn.execute(update)
# Delete the job
delete = self.t_jobs.delete().where(
self.t_jobs.c.id == result.job_id
)
await conn.execute(delete)
async def get_job_result(self, job_id: UUID) -> JobResult | None:
async for attempt in self._retry():
with attempt:
async with self.engine.begin() as conn:
# Retrieve the result
query = self.t_job_results.select().where(
self.t_job_results.c.job_id == job_id
)
row = (await conn.execute(query)).first()
# Delete the result
delete = self.t_job_results.delete().where(
self.t_job_results.c.job_id == job_id
)
await conn.execute(delete)
return (
JobResult.unmarshal(self.serializer, row._asdict())
if row
else None
) | APScheduler | /APScheduler-4.0.0a1.tar.gz/APScheduler-4.0.0a1/src/apscheduler/datastores/async_sqlalchemy.py | async_sqlalchemy.py |
from __future__ import annotations
from bisect import bisect_left, insort_right
from collections import defaultdict
from datetime import MAXYEAR, datetime, timedelta, timezone
from functools import partial
from typing import Any, Iterable
from uuid import UUID
import attrs
from .._enums import ConflictPolicy
from .._events import (
JobAcquired,
JobAdded,
ScheduleAdded,
ScheduleRemoved,
ScheduleUpdated,
TaskAdded,
TaskRemoved,
TaskUpdated,
)
from .._exceptions import ConflictingIdError, TaskLookupError
from .._structures import Job, JobResult, Schedule, Task
from .base import BaseDataStore
max_datetime = datetime(MAXYEAR, 12, 31, 23, 59, 59, 999999, tzinfo=timezone.utc)
@attrs.define
class TaskState:
task: Task
running_jobs: int = 0
saved_state: Any = None
def __eq__(self, other):
return self.task.id == other.task.id
@attrs.define
class ScheduleState:
schedule: Schedule
next_fire_time: datetime | None = attrs.field(init=False, eq=False)
acquired_by: str | None = attrs.field(init=False, eq=False, default=None)
acquired_until: datetime | None = attrs.field(init=False, eq=False, default=None)
def __attrs_post_init__(self):
self.next_fire_time = self.schedule.next_fire_time
def __eq__(self, other):
return self.schedule.id == other.schedule.id
def __lt__(self, other):
if self.next_fire_time is None:
return False
elif other.next_fire_time is None:
return self.next_fire_time is not None
elif self.next_fire_time != other.next_fire_time:
return self.next_fire_time < other.next_fire_time
else:
return self.schedule.id < other.schedule.id
def __hash__(self):
return hash(self.schedule.id)
@attrs.define(order=True)
class JobState:
job: Job = attrs.field(order=False)
created_at: datetime = attrs.field(
init=False, factory=partial(datetime.now, timezone.utc)
)
acquired_by: str | None = attrs.field(eq=False, order=False, default=None)
acquired_until: datetime | None = attrs.field(eq=False, order=False, default=None)
def __eq__(self, other):
return self.job.id == other.job.id
def __hash__(self):
return hash(self.job.id)
@attrs.define(eq=False)
class MemoryDataStore(BaseDataStore):
"""
Stores scheduler data in memory, without serializing it.
Can be shared between multiple schedulers and workers within the same event loop.
:param lock_expiration_delay: maximum amount of time (in seconds) that a scheduler
or worker can keep a lock on a schedule or task
"""
lock_expiration_delay: float = 30
_tasks: dict[str, TaskState] = attrs.Factory(dict)
_schedules: list[ScheduleState] = attrs.Factory(list)
_schedules_by_id: dict[str, ScheduleState] = attrs.Factory(dict)
_schedules_by_task_id: dict[str, set[ScheduleState]] = attrs.Factory(
partial(defaultdict, set)
)
_jobs: list[JobState] = attrs.Factory(list)
_jobs_by_id: dict[UUID, JobState] = attrs.Factory(dict)
_jobs_by_task_id: dict[str, set[JobState]] = attrs.Factory(
partial(defaultdict, set)
)
_job_results: dict[UUID, JobResult] = attrs.Factory(dict)
def _find_schedule_index(self, state: ScheduleState) -> int | None:
left_index = bisect_left(self._schedules, state)
right_index = bisect_left(self._schedules, state)
return self._schedules.index(state, left_index, right_index + 1)
def _find_job_index(self, state: JobState) -> int | None:
left_index = bisect_left(self._jobs, state)
right_index = bisect_left(self._jobs, state)
return self._jobs.index(state, left_index, right_index + 1)
def get_schedules(self, ids: set[str] | None = None) -> list[Schedule]:
return [
state.schedule
for state in self._schedules
if ids is None or state.schedule.id in ids
]
def add_task(self, task: Task) -> None:
task_exists = task.id in self._tasks
self._tasks[task.id] = TaskState(task)
if task_exists:
self._events.publish(TaskUpdated(task_id=task.id))
else:
self._events.publish(TaskAdded(task_id=task.id))
def remove_task(self, task_id: str) -> None:
try:
del self._tasks[task_id]
except KeyError:
raise TaskLookupError(task_id) from None
self._events.publish(TaskRemoved(task_id=task_id))
def get_task(self, task_id: str) -> Task:
try:
return self._tasks[task_id].task
except KeyError:
raise TaskLookupError(task_id) from None
def get_tasks(self) -> list[Task]:
return sorted(
(state.task for state in self._tasks.values()), key=lambda task: task.id
)
def add_schedule(self, schedule: Schedule, conflict_policy: ConflictPolicy) -> None:
old_state = self._schedules_by_id.get(schedule.id)
if old_state is not None:
if conflict_policy is ConflictPolicy.do_nothing:
return
elif conflict_policy is ConflictPolicy.exception:
raise ConflictingIdError(schedule.id)
index = self._find_schedule_index(old_state)
del self._schedules[index]
self._schedules_by_task_id[old_state.schedule.task_id].remove(old_state)
state = ScheduleState(schedule)
self._schedules_by_id[schedule.id] = state
self._schedules_by_task_id[schedule.task_id].add(state)
insort_right(self._schedules, state)
if old_state is not None:
event = ScheduleUpdated(
schedule_id=schedule.id, next_fire_time=schedule.next_fire_time
)
else:
event = ScheduleAdded(
schedule_id=schedule.id, next_fire_time=schedule.next_fire_time
)
self._events.publish(event)
def remove_schedules(self, ids: Iterable[str]) -> None:
for schedule_id in ids:
state = self._schedules_by_id.pop(schedule_id, None)
if state:
self._schedules.remove(state)
event = ScheduleRemoved(schedule_id=state.schedule.id)
self._events.publish(event)
def acquire_schedules(self, scheduler_id: str, limit: int) -> list[Schedule]:
now = datetime.now(timezone.utc)
schedules: list[Schedule] = []
for state in self._schedules:
if state.next_fire_time is None or state.next_fire_time > now:
# The schedule is either paused or not yet due
break
elif state.acquired_by is not None:
if state.acquired_by != scheduler_id and now <= state.acquired_until:
# The schedule has been acquired by another scheduler and the
# timeout has not expired yet
continue
schedules.append(state.schedule)
state.acquired_by = scheduler_id
state.acquired_until = now + timedelta(seconds=self.lock_expiration_delay)
if len(schedules) == limit:
break
return schedules
def release_schedules(self, scheduler_id: str, schedules: list[Schedule]) -> None:
# Send update events for schedules that have a next time
finished_schedule_ids: list[str] = []
for s in schedules:
if s.next_fire_time is not None:
# Remove the schedule
schedule_state = self._schedules_by_id.get(s.id)
index = self._find_schedule_index(schedule_state)
del self._schedules[index]
# Re-add the schedule to its new position
schedule_state.next_fire_time = s.next_fire_time
schedule_state.acquired_by = None
schedule_state.acquired_until = None
insort_right(self._schedules, schedule_state)
event = ScheduleUpdated(
schedule_id=s.id, next_fire_time=s.next_fire_time
)
self._events.publish(event)
else:
finished_schedule_ids.append(s.id)
# Remove schedules that didn't get a new next fire time
self.remove_schedules(finished_schedule_ids)
def get_next_schedule_run_time(self) -> datetime | None:
return self._schedules[0].next_fire_time if self._schedules else None
def add_job(self, job: Job) -> None:
state = JobState(job)
self._jobs.append(state)
self._jobs_by_id[job.id] = state
self._jobs_by_task_id[job.task_id].add(state)
event = JobAdded(
job_id=job.id,
task_id=job.task_id,
schedule_id=job.schedule_id,
tags=job.tags,
)
self._events.publish(event)
def get_jobs(self, ids: Iterable[UUID] | None = None) -> list[Job]:
if ids is not None:
ids = frozenset(ids)
return [state.job for state in self._jobs if ids is None or state.job.id in ids]
def acquire_jobs(self, worker_id: str, limit: int | None = None) -> list[Job]:
now = datetime.now(timezone.utc)
jobs: list[Job] = []
for _index, job_state in enumerate(self._jobs):
task_state = self._tasks[job_state.job.task_id]
# Skip already acquired jobs (unless the acquisition lock has expired)
if job_state.acquired_by is not None:
if job_state.acquired_until >= now:
continue
else:
task_state.running_jobs -= 1
# Check if the task allows one more job to be started
if (
task_state.task.max_running_jobs is not None
and task_state.running_jobs >= task_state.task.max_running_jobs
):
continue
# Mark the job as acquired by this worker
jobs.append(job_state.job)
job_state.acquired_by = worker_id
job_state.acquired_until = now + timedelta(
seconds=self.lock_expiration_delay
)
# Increment the number of running jobs for this task
task_state.running_jobs += 1
# Exit the loop if enough jobs have been acquired
if len(jobs) == limit:
break
# Publish the appropriate events
for job in jobs:
self._events.publish(JobAcquired(job_id=job.id, worker_id=worker_id))
return jobs
def release_job(self, worker_id: str, task_id: str, result: JobResult) -> None:
# Record the job result
if result.expires_at > result.finished_at:
self._job_results[result.job_id] = result
# Decrement the number of running jobs for this task
task_state = self._tasks.get(task_id)
if task_state is not None:
task_state.running_jobs -= 1
# Delete the job
job_state = self._jobs_by_id.pop(result.job_id)
self._jobs_by_task_id[task_id].remove(job_state)
index = self._find_job_index(job_state)
del self._jobs[index]
def get_job_result(self, job_id: UUID) -> JobResult | None:
return self._job_results.pop(job_id, None) | APScheduler | /APScheduler-4.0.0a1.tar.gz/APScheduler-4.0.0a1/src/apscheduler/datastores/memory.py | memory.py |
from __future__ import annotations
import operator
from collections import defaultdict
from datetime import datetime, timedelta, timezone
from logging import Logger, getLogger
from typing import Any, Callable, ClassVar, Iterable
from uuid import UUID
import attrs
import pymongo
import tenacity
from attrs.validators import instance_of
from bson import CodecOptions, UuidRepresentation
from bson.codec_options import TypeEncoder, TypeRegistry
from pymongo import ASCENDING, DeleteOne, MongoClient, UpdateOne
from pymongo.collection import Collection
from pymongo.errors import ConnectionFailure, DuplicateKeyError
from .._enums import CoalescePolicy, ConflictPolicy, JobOutcome
from .._events import (
DataStoreEvent,
JobAcquired,
JobAdded,
ScheduleAdded,
ScheduleRemoved,
ScheduleUpdated,
TaskAdded,
TaskRemoved,
TaskUpdated,
)
from .._exceptions import (
ConflictingIdError,
DeserializationError,
SerializationError,
TaskLookupError,
)
from .._structures import Job, JobResult, RetrySettings, Schedule, Task
from ..abc import EventBroker, Serializer
from ..serializers.pickle import PickleSerializer
from .base import BaseDataStore
class CustomEncoder(TypeEncoder):
def __init__(self, python_type: type, encoder: Callable):
self._python_type = python_type
self._encoder = encoder
@property
def python_type(self) -> type:
return self._python_type
def transform_python(self, value: Any) -> Any:
return self._encoder(value)
@attrs.define(eq=False)
class MongoDBDataStore(BaseDataStore):
"""
Uses a MongoDB server to store data.
When started, this data store creates the appropriate indexes on the given database
if they're not already present.
Operations are retried (in accordance to ``retry_settings``) when an operation
raises :exc:`pymongo.errors.ConnectionFailure`.
:param client: a PyMongo client
:param serializer: the serializer used to (de)serialize tasks, schedules and jobs
for storage
:param database: name of the database to use
:param lock_expiration_delay: maximum amount of time (in seconds) that a scheduler
or worker can keep a lock on a schedule or task
:param retry_settings: Tenacity settings for retrying operations in case of a
database connecitivty problem
:param start_from_scratch: erase all existing data during startup (useful for test
suites)
"""
client: MongoClient = attrs.field(validator=instance_of(MongoClient))
serializer: Serializer = attrs.field(factory=PickleSerializer, kw_only=True)
database: str = attrs.field(default="apscheduler", kw_only=True)
lock_expiration_delay: float = attrs.field(default=30, kw_only=True)
retry_settings: RetrySettings = attrs.field(default=RetrySettings(), kw_only=True)
start_from_scratch: bool = attrs.field(default=False, kw_only=True)
_task_attrs: ClassVar[list[str]] = [field.name for field in attrs.fields(Task)]
_schedule_attrs: ClassVar[list[str]] = [
field.name for field in attrs.fields(Schedule)
]
_job_attrs: ClassVar[list[str]] = [field.name for field in attrs.fields(Job)]
_logger: Logger = attrs.field(init=False, factory=lambda: getLogger(__name__))
_local_tasks: dict[str, Task] = attrs.field(init=False, factory=dict)
def __attrs_post_init__(self) -> None:
type_registry = TypeRegistry(
[
CustomEncoder(timedelta, timedelta.total_seconds),
CustomEncoder(ConflictPolicy, operator.attrgetter("name")),
CustomEncoder(CoalescePolicy, operator.attrgetter("name")),
CustomEncoder(JobOutcome, operator.attrgetter("name")),
]
)
codec_options = CodecOptions(
tz_aware=True,
type_registry=type_registry,
uuid_representation=UuidRepresentation.STANDARD,
)
database = self.client.get_database(self.database, codec_options=codec_options)
self._tasks: Collection = database["tasks"]
self._schedules: Collection = database["schedules"]
self._jobs: Collection = database["jobs"]
self._jobs_results: Collection = database["job_results"]
@classmethod
def from_url(cls, uri: str, **options) -> MongoDBDataStore:
client = MongoClient(uri)
return cls(client, **options)
def _retry(self) -> tenacity.Retrying:
return tenacity.Retrying(
stop=self.retry_settings.stop,
wait=self.retry_settings.wait,
retry=tenacity.retry_if_exception_type(ConnectionFailure),
after=self._after_attempt,
reraise=True,
)
def _after_attempt(self, retry_state: tenacity.RetryCallState) -> None:
self._logger.warning(
"Temporary data store error (attempt %d): %s",
retry_state.attempt_number,
retry_state.outcome.exception(),
)
def start(self, event_broker: EventBroker) -> None:
super().start(event_broker)
server_info = self.client.server_info()
if server_info["versionArray"] < [4, 0]:
raise RuntimeError(
f"MongoDB server must be at least v4.0; current version = "
f"{server_info['version']}"
)
for attempt in self._retry():
with attempt, self.client.start_session() as session:
if self.start_from_scratch:
self._tasks.delete_many({}, session=session)
self._schedules.delete_many({}, session=session)
self._jobs.delete_many({}, session=session)
self._jobs_results.delete_many({}, session=session)
self._schedules.create_index("next_fire_time", session=session)
self._jobs.create_index("task_id", session=session)
self._jobs.create_index("created_at", session=session)
self._jobs.create_index("tags", session=session)
self._jobs_results.create_index("finished_at", session=session)
self._jobs_results.create_index("expires_at", session=session)
def add_task(self, task: Task) -> None:
for attempt in self._retry():
with attempt:
previous = self._tasks.find_one_and_update(
{"_id": task.id},
{
"$set": task.marshal(self.serializer),
"$setOnInsert": {"running_jobs": 0},
},
upsert=True,
)
self._local_tasks[task.id] = task
if previous:
self._events.publish(TaskUpdated(task_id=task.id))
else:
self._events.publish(TaskAdded(task_id=task.id))
def remove_task(self, task_id: str) -> None:
for attempt in self._retry():
with attempt:
if not self._tasks.find_one_and_delete({"_id": task_id}):
raise TaskLookupError(task_id)
del self._local_tasks[task_id]
self._events.publish(TaskRemoved(task_id=task_id))
def get_task(self, task_id: str) -> Task:
try:
return self._local_tasks[task_id]
except KeyError:
for attempt in self._retry():
with attempt:
document = self._tasks.find_one(
{"_id": task_id}, projection=self._task_attrs
)
if not document:
raise TaskLookupError(task_id)
document["id"] = document.pop("id")
task = self._local_tasks[task_id] = Task.unmarshal(
self.serializer, document
)
return task
def get_tasks(self) -> list[Task]:
for attempt in self._retry():
with attempt:
tasks: list[Task] = []
for document in self._tasks.find(
projection=self._task_attrs, sort=[("_id", pymongo.ASCENDING)]
):
document["id"] = document.pop("_id")
tasks.append(Task.unmarshal(self.serializer, document))
return tasks
def get_schedules(self, ids: set[str] | None = None) -> list[Schedule]:
filters = {"_id": {"$in": list(ids)}} if ids is not None else {}
for attempt in self._retry():
with attempt:
schedules: list[Schedule] = []
cursor = self._schedules.find(filters).sort("_id")
for document in cursor:
document["id"] = document.pop("_id")
try:
schedule = Schedule.unmarshal(self.serializer, document)
except DeserializationError:
self._logger.warning(
"Failed to deserialize schedule %r", document["_id"]
)
continue
schedules.append(schedule)
return schedules
def add_schedule(self, schedule: Schedule, conflict_policy: ConflictPolicy) -> None:
event: DataStoreEvent
document = schedule.marshal(self.serializer)
document["_id"] = document.pop("id")
try:
for attempt in self._retry():
with attempt:
self._schedules.insert_one(document)
except DuplicateKeyError:
if conflict_policy is ConflictPolicy.exception:
raise ConflictingIdError(schedule.id) from None
elif conflict_policy is ConflictPolicy.replace:
for attempt in self._retry():
with attempt:
self._schedules.replace_one(
{"_id": schedule.id}, document, True
)
event = ScheduleUpdated(
schedule_id=schedule.id, next_fire_time=schedule.next_fire_time
)
self._events.publish(event)
else:
event = ScheduleAdded(
schedule_id=schedule.id, next_fire_time=schedule.next_fire_time
)
self._events.publish(event)
def remove_schedules(self, ids: Iterable[str]) -> None:
filters = {"_id": {"$in": list(ids)}} if ids is not None else {}
for attempt in self._retry():
with attempt, self.client.start_session() as session:
cursor = self._schedules.find(
filters, projection=["_id"], session=session
)
ids = [doc["_id"] for doc in cursor]
if ids:
self._schedules.delete_many(filters, session=session)
for schedule_id in ids:
self._events.publish(ScheduleRemoved(schedule_id=schedule_id))
def acquire_schedules(self, scheduler_id: str, limit: int) -> list[Schedule]:
for attempt in self._retry():
with attempt, self.client.start_session() as session:
schedules: list[Schedule] = []
cursor = (
self._schedules.find(
{
"next_fire_time": {"$ne": None},
"$or": [
{"acquired_until": {"$exists": False}},
{"acquired_until": {"$lt": datetime.now(timezone.utc)}},
],
},
session=session,
)
.sort("next_fire_time")
.limit(limit)
)
for document in cursor:
document["id"] = document.pop("_id")
schedule = Schedule.unmarshal(self.serializer, document)
schedules.append(schedule)
if schedules:
now = datetime.now(timezone.utc)
acquired_until = datetime.fromtimestamp(
now.timestamp() + self.lock_expiration_delay, now.tzinfo
)
filters = {"_id": {"$in": [schedule.id for schedule in schedules]}}
update = {
"$set": {
"acquired_by": scheduler_id,
"acquired_until": acquired_until,
}
}
self._schedules.update_many(filters, update, session=session)
return schedules
def release_schedules(self, scheduler_id: str, schedules: list[Schedule]) -> None:
updated_schedules: list[tuple[str, datetime]] = []
finished_schedule_ids: list[str] = []
# Update schedules that have a next fire time
requests = []
for schedule in schedules:
filters = {"_id": schedule.id, "acquired_by": scheduler_id}
if schedule.next_fire_time is not None:
try:
serialized_trigger = self.serializer.serialize(schedule.trigger)
except SerializationError:
self._logger.exception(
"Error serializing schedule %r – " "removing from data store",
schedule.id,
)
requests.append(DeleteOne(filters))
finished_schedule_ids.append(schedule.id)
continue
update = {
"$unset": {
"acquired_by": True,
"acquired_until": True,
},
"$set": {
"trigger": serialized_trigger,
"next_fire_time": schedule.next_fire_time,
},
}
requests.append(UpdateOne(filters, update))
updated_schedules.append((schedule.id, schedule.next_fire_time))
else:
requests.append(DeleteOne(filters))
finished_schedule_ids.append(schedule.id)
if requests:
for attempt in self._retry():
with attempt, self.client.start_session() as session:
self._schedules.bulk_write(
requests, ordered=False, session=session
)
for schedule_id, next_fire_time in updated_schedules:
event = ScheduleUpdated(
schedule_id=schedule_id, next_fire_time=next_fire_time
)
self._events.publish(event)
for schedule_id in finished_schedule_ids:
self._events.publish(ScheduleRemoved(schedule_id=schedule_id))
def get_next_schedule_run_time(self) -> datetime | None:
for attempt in self._retry():
with attempt:
document = self._schedules.find_one(
{"next_run_time": {"$ne": None}},
projection=["next_run_time"],
sort=[("next_run_time", ASCENDING)],
)
if document:
return document["next_run_time"]
else:
return None
def add_job(self, job: Job) -> None:
document = job.marshal(self.serializer)
document["_id"] = document.pop("id")
for attempt in self._retry():
with attempt:
self._jobs.insert_one(document)
event = JobAdded(
job_id=job.id,
task_id=job.task_id,
schedule_id=job.schedule_id,
tags=job.tags,
)
self._events.publish(event)
def get_jobs(self, ids: Iterable[UUID] | None = None) -> list[Job]:
filters = {"_id": {"$in": list(ids)}} if ids is not None else {}
for attempt in self._retry():
with attempt:
jobs: list[Job] = []
cursor = self._jobs.find(filters).sort("_id")
for document in cursor:
document["id"] = document.pop("_id")
try:
job = Job.unmarshal(self.serializer, document)
except DeserializationError:
self._logger.warning(
"Failed to deserialize job %r", document["id"]
)
continue
jobs.append(job)
return jobs
def acquire_jobs(self, worker_id: str, limit: int | None = None) -> list[Job]:
for attempt in self._retry():
with attempt, self.client.start_session() as session:
cursor = self._jobs.find(
{
"$or": [
{"acquired_until": {"$exists": False}},
{"acquired_until": {"$lt": datetime.now(timezone.utc)}},
]
},
sort=[("created_at", ASCENDING)],
limit=limit,
session=session,
)
documents = list(cursor)
# Retrieve the limits
task_ids: set[str] = {document["task_id"] for document in documents}
task_limits = self._tasks.find(
{"_id": {"$in": list(task_ids)}, "max_running_jobs": {"$ne": None}},
projection=["max_running_jobs", "running_jobs"],
session=session,
)
job_slots_left = {
doc["_id"]: doc["max_running_jobs"] - doc["running_jobs"]
for doc in task_limits
}
# Filter out jobs that don't have free slots
acquired_jobs: list[Job] = []
increments: dict[str, int] = defaultdict(lambda: 0)
for document in documents:
document["id"] = document.pop("_id")
job = Job.unmarshal(self.serializer, document)
# Don't acquire the job if there are no free slots left
slots_left = job_slots_left.get(job.task_id)
if slots_left == 0:
continue
elif slots_left is not None:
job_slots_left[job.task_id] -= 1
acquired_jobs.append(job)
increments[job.task_id] += 1
if acquired_jobs:
now = datetime.now(timezone.utc)
acquired_until = datetime.fromtimestamp(
now.timestamp() + self.lock_expiration_delay, timezone.utc
)
filters = {"_id": {"$in": [job.id for job in acquired_jobs]}}
update = {
"$set": {
"acquired_by": worker_id,
"acquired_until": acquired_until,
}
}
self._jobs.update_many(filters, update, session=session)
# Increment the running job counters on each task
for task_id, increment in increments.items():
self._tasks.find_one_and_update(
{"_id": task_id},
{"$inc": {"running_jobs": increment}},
session=session,
)
# Publish the appropriate events
for job in acquired_jobs:
self._events.publish(JobAcquired(job_id=job.id, worker_id=worker_id))
return acquired_jobs
def release_job(self, worker_id: str, task_id: str, result: JobResult) -> None:
for attempt in self._retry():
with attempt, self.client.start_session() as session:
# Record the job result
if result.expires_at > result.finished_at:
document = result.marshal(self.serializer)
document["_id"] = document.pop("job_id")
self._jobs_results.insert_one(document, session=session)
# Decrement the running jobs counter
self._tasks.find_one_and_update(
{"_id": task_id}, {"$inc": {"running_jobs": -1}}, session=session
)
# Delete the job
self._jobs.delete_one({"_id": result.job_id}, session=session)
def get_job_result(self, job_id: UUID) -> JobResult | None:
for attempt in self._retry():
with attempt:
document = self._jobs_results.find_one_and_delete({"_id": job_id})
if document:
document["job_id"] = document.pop("_id")
return JobResult.unmarshal(self.serializer, document)
else:
return None | APScheduler | /APScheduler-4.0.0a1.tar.gz/APScheduler-4.0.0a1/src/apscheduler/datastores/mongodb.py | mongodb.py |
from __future__ import annotations
import sys
from datetime import datetime
from typing import Iterable
from uuid import UUID
import attrs
from anyio import to_thread
from anyio.from_thread import BlockingPortal
from .._enums import ConflictPolicy
from .._structures import Job, JobResult, Schedule, Task
from ..abc import AsyncEventBroker, DataStore
from ..eventbrokers.async_adapter import AsyncEventBrokerAdapter, SyncEventBrokerAdapter
from .base import BaseAsyncDataStore
@attrs.define(eq=False)
class AsyncDataStoreAdapter(BaseAsyncDataStore):
original: DataStore
_portal: BlockingPortal = attrs.field(init=False)
async def start(self, event_broker: AsyncEventBroker) -> None:
await super().start(event_broker)
self._portal = BlockingPortal()
await self._portal.__aenter__()
if isinstance(event_broker, AsyncEventBrokerAdapter):
sync_event_broker = event_broker.original
else:
sync_event_broker = SyncEventBrokerAdapter(event_broker, self._portal)
try:
await to_thread.run_sync(lambda: self.original.start(sync_event_broker))
except BaseException:
await self._portal.__aexit__(*sys.exc_info())
raise
async def stop(self, *, force: bool = False) -> None:
try:
await to_thread.run_sync(lambda: self.original.stop(force=force))
finally:
await self._portal.__aexit__(None, None, None)
await super().stop(force=force)
async def add_task(self, task: Task) -> None:
await to_thread.run_sync(self.original.add_task, task)
async def remove_task(self, task_id: str) -> None:
await to_thread.run_sync(self.original.remove_task, task_id)
async def get_task(self, task_id: str) -> Task:
return await to_thread.run_sync(self.original.get_task, task_id)
async def get_tasks(self) -> list[Task]:
return await to_thread.run_sync(self.original.get_tasks)
async def get_schedules(self, ids: set[str] | None = None) -> list[Schedule]:
return await to_thread.run_sync(self.original.get_schedules, ids)
async def add_schedule(
self, schedule: Schedule, conflict_policy: ConflictPolicy
) -> None:
await to_thread.run_sync(self.original.add_schedule, schedule, conflict_policy)
async def remove_schedules(self, ids: Iterable[str]) -> None:
await to_thread.run_sync(self.original.remove_schedules, ids)
async def acquire_schedules(self, scheduler_id: str, limit: int) -> list[Schedule]:
return await to_thread.run_sync(
self.original.acquire_schedules, scheduler_id, limit
)
async def release_schedules(
self, scheduler_id: str, schedules: list[Schedule]
) -> None:
await to_thread.run_sync(
self.original.release_schedules, scheduler_id, schedules
)
async def get_next_schedule_run_time(self) -> datetime | None:
return await to_thread.run_sync(self.original.get_next_schedule_run_time)
async def add_job(self, job: Job) -> None:
await to_thread.run_sync(self.original.add_job, job)
async def get_jobs(self, ids: Iterable[UUID] | None = None) -> list[Job]:
return await to_thread.run_sync(self.original.get_jobs, ids)
async def acquire_jobs(self, worker_id: str, limit: int | None = None) -> list[Job]:
return await to_thread.run_sync(self.original.acquire_jobs, worker_id, limit)
async def release_job(
self, worker_id: str, task_id: str, result: JobResult
) -> None:
await to_thread.run_sync(self.original.release_job, worker_id, task_id, result)
async def get_job_result(self, job_id: UUID) -> JobResult | None:
return await to_thread.run_sync(self.original.get_job_result, job_id) | APScheduler | /APScheduler-4.0.0a1.tar.gz/APScheduler-4.0.0a1/src/apscheduler/datastores/async_adapter.py | async_adapter.py |
from __future__ import annotations
from collections import defaultdict
from datetime import datetime, timedelta, timezone
from logging import Logger, getLogger
from typing import Any, Iterable
from uuid import UUID
import attrs
import tenacity
from sqlalchemy import (
JSON,
TIMESTAMP,
BigInteger,
Column,
Enum,
Integer,
LargeBinary,
MetaData,
Table,
TypeDecorator,
Unicode,
and_,
bindparam,
or_,
select,
)
from sqlalchemy.engine import URL, Dialect, Result
from sqlalchemy.exc import CompileError, IntegrityError, OperationalError
from sqlalchemy.future import Engine, create_engine
from sqlalchemy.sql.ddl import DropTable
from sqlalchemy.sql.elements import BindParameter, literal
from .._enums import CoalescePolicy, ConflictPolicy, JobOutcome
from .._events import (
Event,
JobAcquired,
JobAdded,
JobDeserializationFailed,
ScheduleAdded,
ScheduleDeserializationFailed,
ScheduleRemoved,
ScheduleUpdated,
TaskAdded,
TaskRemoved,
TaskUpdated,
)
from .._exceptions import ConflictingIdError, SerializationError, TaskLookupError
from .._structures import Job, JobResult, RetrySettings, Schedule, Task
from ..abc import EventBroker, Serializer
from ..marshalling import callable_to_ref
from ..serializers.pickle import PickleSerializer
from .base import BaseDataStore
class EmulatedUUID(TypeDecorator):
impl = Unicode(32)
cache_ok = True
def process_bind_param(self, value, dialect: Dialect) -> Any:
return value.hex if value is not None else None
def process_result_value(self, value: Any, dialect: Dialect):
return UUID(value) if value else None
class EmulatedTimestampTZ(TypeDecorator):
impl = Unicode(32)
cache_ok = True
def process_bind_param(self, value, dialect: Dialect) -> Any:
return value.isoformat() if value is not None else None
def process_result_value(self, value: Any, dialect: Dialect):
return datetime.fromisoformat(value) if value is not None else None
class EmulatedInterval(TypeDecorator):
impl = BigInteger()
cache_ok = True
def process_bind_param(self, value, dialect: Dialect) -> Any:
return value.total_seconds() if value is not None else None
def process_result_value(self, value: Any, dialect: Dialect):
return timedelta(seconds=value) if value is not None else None
@attrs.define(kw_only=True, eq=False)
class _BaseSQLAlchemyDataStore:
schema: str | None = attrs.field(default=None)
serializer: Serializer = attrs.field(factory=PickleSerializer)
lock_expiration_delay: float = attrs.field(default=30)
max_poll_time: float | None = attrs.field(default=1)
max_idle_time: float = attrs.field(default=60)
retry_settings: RetrySettings = attrs.field(default=RetrySettings())
start_from_scratch: bool = attrs.field(default=False)
_logger: Logger = attrs.field(init=False, factory=lambda: getLogger(__name__))
def __attrs_post_init__(self) -> None:
# Generate the table definitions
self._metadata = self.get_table_definitions()
self.t_metadata = self._metadata.tables["metadata"]
self.t_tasks = self._metadata.tables["tasks"]
self.t_schedules = self._metadata.tables["schedules"]
self.t_jobs = self._metadata.tables["jobs"]
self.t_job_results = self._metadata.tables["job_results"]
# Find out if the dialect supports UPDATE...RETURNING
update = self.t_jobs.update().returning(self.t_jobs.c.id)
try:
update.compile(bind=self.engine)
except CompileError:
self._supports_update_returning = False
else:
self._supports_update_returning = True
def _after_attempt(self, retry_state: tenacity.RetryCallState) -> None:
self._logger.warning(
"Temporary data store error (attempt %d): %s",
retry_state.attempt_number,
retry_state.outcome.exception(),
)
def get_table_definitions(self) -> MetaData:
if self.engine.dialect.name == "postgresql":
from sqlalchemy.dialects import postgresql
timestamp_type = TIMESTAMP(timezone=True)
job_id_type = postgresql.UUID(as_uuid=True)
interval_type = postgresql.INTERVAL(precision=6)
tags_type = postgresql.ARRAY(Unicode)
else:
timestamp_type = EmulatedTimestampTZ
job_id_type = EmulatedUUID
interval_type = EmulatedInterval
tags_type = JSON
metadata = MetaData()
Table("metadata", metadata, Column("schema_version", Integer, nullable=False))
Table(
"tasks",
metadata,
Column("id", Unicode(500), primary_key=True),
Column("func", Unicode(500), nullable=False),
Column("state", LargeBinary),
Column("max_running_jobs", Integer),
Column("misfire_grace_time", interval_type),
Column("running_jobs", Integer, nullable=False, server_default=literal(0)),
)
Table(
"schedules",
metadata,
Column("id", Unicode(500), primary_key=True),
Column("task_id", Unicode(500), nullable=False, index=True),
Column("trigger", LargeBinary),
Column("args", LargeBinary),
Column("kwargs", LargeBinary),
Column("coalesce", Enum(CoalescePolicy), nullable=False),
Column("misfire_grace_time", interval_type),
Column("max_jitter", interval_type),
Column("tags", tags_type, nullable=False),
Column("next_fire_time", timestamp_type, index=True),
Column("last_fire_time", timestamp_type),
Column("acquired_by", Unicode(500)),
Column("acquired_until", timestamp_type),
)
Table(
"jobs",
metadata,
Column("id", job_id_type, primary_key=True),
Column("task_id", Unicode(500), nullable=False, index=True),
Column("args", LargeBinary, nullable=False),
Column("kwargs", LargeBinary, nullable=False),
Column("schedule_id", Unicode(500)),
Column("scheduled_fire_time", timestamp_type),
Column("jitter", interval_type),
Column("start_deadline", timestamp_type),
Column("result_expiration_time", interval_type),
Column("tags", tags_type, nullable=False),
Column("created_at", timestamp_type, nullable=False),
Column("started_at", timestamp_type),
Column("acquired_by", Unicode(500)),
Column("acquired_until", timestamp_type),
)
Table(
"job_results",
metadata,
Column("job_id", job_id_type, primary_key=True),
Column("outcome", Enum(JobOutcome), nullable=False),
Column("finished_at", timestamp_type, index=True),
Column("expires_at", timestamp_type, nullable=False, index=True),
Column("exception", LargeBinary),
Column("return_value", LargeBinary),
)
return metadata
def _deserialize_schedules(self, result: Result) -> list[Schedule]:
schedules: list[Schedule] = []
for row in result:
try:
schedules.append(Schedule.unmarshal(self.serializer, row._asdict()))
except SerializationError as exc:
self._events.publish(
ScheduleDeserializationFailed(schedule_id=row["id"], exception=exc)
)
return schedules
def _deserialize_jobs(self, result: Result) -> list[Job]:
jobs: list[Job] = []
for row in result:
try:
jobs.append(Job.unmarshal(self.serializer, row._asdict()))
except SerializationError as exc:
self._events.publish(
JobDeserializationFailed(job_id=row["id"], exception=exc)
)
return jobs
@attrs.define(eq=False)
class SQLAlchemyDataStore(_BaseSQLAlchemyDataStore, BaseDataStore):
"""
Uses a relational database to store data.
When started, this data store creates the appropriate tables on the given database
if they're not already present.
Operations are retried (in accordance to ``retry_settings``) when an operation
raises :exc:`sqlalchemy.OperationalError`.
This store has been tested to work with PostgreSQL (psycopg2 driver), MySQL
(pymysql driver) and SQLite.
:param engine: a (synchronous) SQLAlchemy engine
:param schema: a database schema name to use, if not the default
:param serializer: the serializer used to (de)serialize tasks, schedules and jobs
for storage
:param lock_expiration_delay: maximum amount of time (in seconds) that a scheduler
or worker can keep a lock on a schedule or task
:param retry_settings: Tenacity settings for retrying operations in case of a
database connecitivty problem
:param start_from_scratch: erase all existing data during startup (useful for test
suites)
"""
engine: Engine
@classmethod
def from_url(cls, url: str | URL, **kwargs) -> SQLAlchemyDataStore:
"""
Create a new SQLAlchemy data store.
:param url: an SQLAlchemy URL to pass to :func:`~sqlalchemy.create_engine`
:param kwargs: keyword arguments to pass to the initializer of this class
:return: the newly created data store
"""
engine = create_engine(url)
return cls(engine, **kwargs)
def _retry(self) -> tenacity.Retrying:
return tenacity.Retrying(
stop=self.retry_settings.stop,
wait=self.retry_settings.wait,
retry=tenacity.retry_if_exception_type(OperationalError),
after=self._after_attempt,
reraise=True,
)
def start(self, event_broker: EventBroker) -> None:
super().start(event_broker)
for attempt in self._retry():
with attempt, self.engine.begin() as conn:
if self.start_from_scratch:
for table in self._metadata.sorted_tables:
conn.execute(DropTable(table, if_exists=True))
self._metadata.create_all(conn)
query = select(self.t_metadata.c.schema_version)
result = conn.execute(query)
version = result.scalar()
if version is None:
conn.execute(self.t_metadata.insert(values={"schema_version": 1}))
elif version > 1:
raise RuntimeError(
f"Unexpected schema version ({version}); "
f"only version 1 is supported by this version of APScheduler"
)
def add_task(self, task: Task) -> None:
insert = self.t_tasks.insert().values(
id=task.id,
func=callable_to_ref(task.func),
max_running_jobs=task.max_running_jobs,
misfire_grace_time=task.misfire_grace_time,
)
try:
for attempt in self._retry():
with attempt, self.engine.begin() as conn:
conn.execute(insert)
except IntegrityError:
update = (
self.t_tasks.update()
.values(
func=callable_to_ref(task.func),
max_running_jobs=task.max_running_jobs,
misfire_grace_time=task.misfire_grace_time,
)
.where(self.t_tasks.c.id == task.id)
)
for attempt in self._retry():
with attempt, self.engine.begin() as conn:
conn.execute(update)
self._events.publish(TaskUpdated(task_id=task.id))
else:
self._events.publish(TaskAdded(task_id=task.id))
def remove_task(self, task_id: str) -> None:
delete = self.t_tasks.delete().where(self.t_tasks.c.id == task_id)
for attempt in self._retry():
with attempt, self.engine.begin() as conn:
result = conn.execute(delete)
if result.rowcount == 0:
raise TaskLookupError(task_id)
else:
self._events.publish(TaskRemoved(task_id=task_id))
def get_task(self, task_id: str) -> Task:
query = select(
[
self.t_tasks.c.id,
self.t_tasks.c.func,
self.t_tasks.c.max_running_jobs,
self.t_tasks.c.state,
self.t_tasks.c.misfire_grace_time,
]
).where(self.t_tasks.c.id == task_id)
for attempt in self._retry():
with attempt, self.engine.begin() as conn:
result = conn.execute(query)
row = result.first()
if row:
return Task.unmarshal(self.serializer, row._asdict())
else:
raise TaskLookupError(task_id)
def get_tasks(self) -> list[Task]:
query = select(
[
self.t_tasks.c.id,
self.t_tasks.c.func,
self.t_tasks.c.max_running_jobs,
self.t_tasks.c.state,
self.t_tasks.c.misfire_grace_time,
]
).order_by(self.t_tasks.c.id)
for attempt in self._retry():
with attempt, self.engine.begin() as conn:
result = conn.execute(query)
tasks = [
Task.unmarshal(self.serializer, row._asdict()) for row in result
]
return tasks
def add_schedule(self, schedule: Schedule, conflict_policy: ConflictPolicy) -> None:
event: Event
values = schedule.marshal(self.serializer)
insert = self.t_schedules.insert().values(**values)
try:
for attempt in self._retry():
with attempt, self.engine.begin() as conn:
conn.execute(insert)
except IntegrityError:
if conflict_policy is ConflictPolicy.exception:
raise ConflictingIdError(schedule.id) from None
elif conflict_policy is ConflictPolicy.replace:
del values["id"]
update = (
self.t_schedules.update()
.where(self.t_schedules.c.id == schedule.id)
.values(**values)
)
for attempt in self._retry():
with attempt, self.engine.begin() as conn:
conn.execute(update)
event = ScheduleUpdated(
schedule_id=schedule.id, next_fire_time=schedule.next_fire_time
)
self._events.publish(event)
else:
event = ScheduleAdded(
schedule_id=schedule.id, next_fire_time=schedule.next_fire_time
)
self._events.publish(event)
def remove_schedules(self, ids: Iterable[str]) -> None:
for attempt in self._retry():
with attempt, self.engine.begin() as conn:
delete = self.t_schedules.delete().where(self.t_schedules.c.id.in_(ids))
if self._supports_update_returning:
delete = delete.returning(self.t_schedules.c.id)
removed_ids: Iterable[str] = [
row[0] for row in conn.execute(delete)
]
else:
# TODO: actually check which rows were deleted?
conn.execute(delete)
removed_ids = ids
for schedule_id in removed_ids:
self._events.publish(ScheduleRemoved(schedule_id=schedule_id))
def get_schedules(self, ids: set[str] | None = None) -> list[Schedule]:
query = self.t_schedules.select().order_by(self.t_schedules.c.id)
if ids:
query = query.where(self.t_schedules.c.id.in_(ids))
for attempt in self._retry():
with attempt, self.engine.begin() as conn:
result = conn.execute(query)
return self._deserialize_schedules(result)
def acquire_schedules(self, scheduler_id: str, limit: int) -> list[Schedule]:
for attempt in self._retry():
with attempt, self.engine.begin() as conn:
now = datetime.now(timezone.utc)
acquired_until = now + timedelta(seconds=self.lock_expiration_delay)
schedules_cte = (
select(self.t_schedules.c.id)
.where(
and_(
self.t_schedules.c.next_fire_time.isnot(None),
self.t_schedules.c.next_fire_time <= now,
or_(
self.t_schedules.c.acquired_until.is_(None),
self.t_schedules.c.acquired_until < now,
),
)
)
.order_by(self.t_schedules.c.next_fire_time)
.limit(limit)
.with_for_update(skip_locked=True)
.cte()
)
subselect = select([schedules_cte.c.id])
update = (
self.t_schedules.update()
.where(self.t_schedules.c.id.in_(subselect))
.values(acquired_by=scheduler_id, acquired_until=acquired_until)
)
if self._supports_update_returning:
update = update.returning(*self.t_schedules.columns)
result = conn.execute(update)
else:
conn.execute(update)
query = self.t_schedules.select().where(
and_(self.t_schedules.c.acquired_by == scheduler_id)
)
result = conn.execute(query)
schedules = self._deserialize_schedules(result)
return schedules
def release_schedules(self, scheduler_id: str, schedules: list[Schedule]) -> None:
for attempt in self._retry():
with attempt, self.engine.begin() as conn:
update_events: list[ScheduleUpdated] = []
finished_schedule_ids: list[str] = []
update_args: list[dict[str, Any]] = []
for schedule in schedules:
if schedule.next_fire_time is not None:
try:
serialized_trigger = self.serializer.serialize(
schedule.trigger
)
except SerializationError:
self._logger.exception(
"Error serializing trigger for schedule %r – "
"removing from data store",
schedule.id,
)
finished_schedule_ids.append(schedule.id)
continue
update_args.append(
{
"p_id": schedule.id,
"p_trigger": serialized_trigger,
"p_next_fire_time": schedule.next_fire_time,
}
)
else:
finished_schedule_ids.append(schedule.id)
# Update schedules that have a next fire time
if update_args:
p_id: BindParameter = bindparam("p_id")
p_trigger: BindParameter = bindparam("p_trigger")
p_next_fire_time: BindParameter = bindparam("p_next_fire_time")
update = (
self.t_schedules.update()
.where(
and_(
self.t_schedules.c.id == p_id,
self.t_schedules.c.acquired_by == scheduler_id,
)
)
.values(
trigger=p_trigger,
next_fire_time=p_next_fire_time,
acquired_by=None,
acquired_until=None,
)
)
next_fire_times = {
arg["p_id"]: arg["p_next_fire_time"] for arg in update_args
}
# TODO: actually check which rows were updated?
conn.execute(update, update_args)
updated_ids = list(next_fire_times)
for schedule_id in updated_ids:
event = ScheduleUpdated(
schedule_id=schedule_id,
next_fire_time=next_fire_times[schedule_id],
)
update_events.append(event)
# Remove schedules that have no next fire time or failed to serialize
if finished_schedule_ids:
delete = self.t_schedules.delete().where(
self.t_schedules.c.id.in_(finished_schedule_ids)
)
conn.execute(delete)
for event in update_events:
self._events.publish(event)
for schedule_id in finished_schedule_ids:
self._events.publish(ScheduleRemoved(schedule_id=schedule_id))
def get_next_schedule_run_time(self) -> datetime | None:
query = (
select(self.t_schedules.c.next_fire_time)
.where(self.t_schedules.c.next_fire_time.isnot(None))
.order_by(self.t_schedules.c.next_fire_time)
.limit(1)
)
for attempt in self._retry():
with attempt, self.engine.begin() as conn:
result = conn.execute(query)
return result.scalar()
def add_job(self, job: Job) -> None:
marshalled = job.marshal(self.serializer)
insert = self.t_jobs.insert().values(**marshalled)
for attempt in self._retry():
with attempt, self.engine.begin() as conn:
conn.execute(insert)
event = JobAdded(
job_id=job.id,
task_id=job.task_id,
schedule_id=job.schedule_id,
tags=job.tags,
)
self._events.publish(event)
def get_jobs(self, ids: Iterable[UUID] | None = None) -> list[Job]:
query = self.t_jobs.select().order_by(self.t_jobs.c.id)
if ids:
job_ids = [job_id for job_id in ids]
query = query.where(self.t_jobs.c.id.in_(job_ids))
for attempt in self._retry():
with attempt, self.engine.begin() as conn:
result = conn.execute(query)
return self._deserialize_jobs(result)
def acquire_jobs(self, worker_id: str, limit: int | None = None) -> list[Job]:
for attempt in self._retry():
with attempt, self.engine.begin() as conn:
now = datetime.now(timezone.utc)
acquired_until = now + timedelta(seconds=self.lock_expiration_delay)
query = (
self.t_jobs.select()
.join(self.t_tasks, self.t_tasks.c.id == self.t_jobs.c.task_id)
.where(
or_(
self.t_jobs.c.acquired_until.is_(None),
self.t_jobs.c.acquired_until < now,
)
)
.order_by(self.t_jobs.c.created_at)
.with_for_update(skip_locked=True)
.limit(limit)
)
result = conn.execute(query)
if not result:
return []
# Mark the jobs as acquired by this worker
jobs = self._deserialize_jobs(result)
task_ids: set[str] = {job.task_id for job in jobs}
# Retrieve the limits
query = select(
[
self.t_tasks.c.id,
self.t_tasks.c.max_running_jobs - self.t_tasks.c.running_jobs,
]
).where(
self.t_tasks.c.max_running_jobs.isnot(None),
self.t_tasks.c.id.in_(task_ids),
)
result = conn.execute(query)
job_slots_left = dict(result.fetchall())
# Filter out jobs that don't have free slots
acquired_jobs: list[Job] = []
increments: dict[str, int] = defaultdict(lambda: 0)
for job in jobs:
# Don't acquire the job if there are no free slots left
slots_left = job_slots_left.get(job.task_id)
if slots_left == 0:
continue
elif slots_left is not None:
job_slots_left[job.task_id] -= 1
acquired_jobs.append(job)
increments[job.task_id] += 1
if acquired_jobs:
# Mark the acquired jobs as acquired by this worker
acquired_job_ids = [job.id for job in acquired_jobs]
update = (
self.t_jobs.update()
.values(acquired_by=worker_id, acquired_until=acquired_until)
.where(self.t_jobs.c.id.in_(acquired_job_ids))
)
conn.execute(update)
# Increment the running job counters on each task
p_id: BindParameter = bindparam("p_id")
p_increment: BindParameter = bindparam("p_increment")
params = [
{"p_id": task_id, "p_increment": increment}
for task_id, increment in increments.items()
]
update = (
self.t_tasks.update()
.values(running_jobs=self.t_tasks.c.running_jobs + p_increment)
.where(self.t_tasks.c.id == p_id)
)
conn.execute(update, params)
# Publish the appropriate events
for job in acquired_jobs:
self._events.publish(JobAcquired(job_id=job.id, worker_id=worker_id))
return acquired_jobs
def release_job(self, worker_id: str, task_id: str, result: JobResult) -> None:
for attempt in self._retry():
with attempt, self.engine.begin() as conn:
# Insert the job result
if result.expires_at > result.finished_at:
marshalled = result.marshal(self.serializer)
insert = self.t_job_results.insert().values(**marshalled)
conn.execute(insert)
# Decrement the running jobs counter
update = (
self.t_tasks.update()
.values(running_jobs=self.t_tasks.c.running_jobs - 1)
.where(self.t_tasks.c.id == task_id)
)
conn.execute(update)
# Delete the job
delete = self.t_jobs.delete().where(self.t_jobs.c.id == result.job_id)
conn.execute(delete)
def get_job_result(self, job_id: UUID) -> JobResult | None:
for attempt in self._retry():
with attempt, self.engine.begin() as conn:
# Retrieve the result
query = self.t_job_results.select().where(
self.t_job_results.c.job_id == job_id
)
row = conn.execute(query).first()
# Delete the result
delete = self.t_job_results.delete().where(
self.t_job_results.c.job_id == job_id
)
conn.execute(delete)
return (
JobResult.unmarshal(self.serializer, row._asdict()) if row else None
) | APScheduler | /APScheduler-4.0.0a1.tar.gz/APScheduler-4.0.0a1/src/apscheduler/datastores/sqlalchemy.py | sqlalchemy.py |
from __future__ import annotations
import atexit
import os
import platform
import random
import threading
from concurrent.futures import Future
from contextlib import ExitStack
from datetime import datetime, timedelta, timezone
from logging import Logger, getLogger
from types import TracebackType
from typing import Any, Callable, Iterable, Mapping, cast
from uuid import UUID, uuid4
import attrs
from .._context import current_scheduler
from .._enums import CoalescePolicy, ConflictPolicy, JobOutcome, RunState
from .._events import (
Event,
JobReleased,
ScheduleAdded,
SchedulerStarted,
SchedulerStopped,
ScheduleUpdated,
)
from .._exceptions import (
JobCancelled,
JobDeadlineMissed,
JobLookupError,
ScheduleLookupError,
)
from .._structures import Job, JobResult, Schedule, Task
from ..abc import DataStore, EventBroker, Trigger
from ..datastores.memory import MemoryDataStore
from ..eventbrokers.local import LocalEventBroker
from ..marshalling import callable_to_ref
from ..workers.sync import Worker
_microsecond_delta = timedelta(microseconds=1)
_zero_timedelta = timedelta()
@attrs.define(eq=False)
class Scheduler:
"""A synchronous scheduler implementation."""
data_store: DataStore = attrs.field(factory=MemoryDataStore)
event_broker: EventBroker = attrs.field(factory=LocalEventBroker)
identity: str = attrs.field(kw_only=True, default=None)
start_worker: bool = attrs.field(kw_only=True, default=True)
logger: Logger | None = attrs.field(kw_only=True, default=getLogger(__name__))
_state: RunState = attrs.field(init=False, default=RunState.stopped)
_thread: threading.Thread | None = attrs.field(init=False, default=None)
_wakeup_event: threading.Event = attrs.field(init=False, factory=threading.Event)
_wakeup_deadline: datetime | None = attrs.field(init=False, default=None)
_services_initialized: bool = attrs.field(init=False, default=False)
_exit_stack: ExitStack = attrs.field(init=False, factory=ExitStack)
_lock: threading.RLock = attrs.field(init=False, factory=threading.RLock)
def __attrs_post_init__(self) -> None:
if not self.identity:
self.identity = f"{platform.node()}-{os.getpid()}-{id(self)}"
def __enter__(self) -> Scheduler:
self.start_in_background()
return self
def __exit__(
self,
exc_type: type[BaseException],
exc_val: BaseException,
exc_tb: TracebackType,
) -> None:
self.stop()
self._join_thread()
def _ensure_services_ready(self, exit_stack: ExitStack | None = None) -> None:
"""Ensure that the data store and event broker have been initialized."""
stack = exit_stack or self._exit_stack
with self._lock:
if not self._services_initialized:
self._services_initialized = True
self.event_broker.start()
stack.push(
lambda *exc_info: self.event_broker.stop(
force=exc_info[0] is not None
)
)
# Initialize the data store
self.data_store.start(self.event_broker)
stack.push(
lambda *exc_info: self.data_store.stop(
force=exc_info[0] is not None
)
)
if not exit_stack:
atexit.register(self._exit_stack.close)
def _schedule_added_or_modified(self, event: Event) -> None:
event_ = cast("ScheduleAdded | ScheduleUpdated", event)
if not self._wakeup_deadline or (
event_.next_fire_time and event_.next_fire_time < self._wakeup_deadline
):
self.logger.debug(
"Detected a %s event – waking up the scheduler", type(event).__name__
)
self._wakeup_event.set()
def _join_thread(self) -> None:
if self._thread:
self._thread.join()
self._thread = None
@property
def state(self) -> RunState:
"""The current running state of the scheduler."""
return self._state
def add_schedule(
self,
func_or_task_id: str | Callable,
trigger: Trigger,
*,
id: str | None = None,
args: Iterable | None = None,
kwargs: Mapping[str, Any] | None = None,
coalesce: CoalescePolicy = CoalescePolicy.latest,
misfire_grace_time: float | timedelta | None = None,
max_jitter: float | timedelta | None = None,
tags: Iterable[str] | None = None,
conflict_policy: ConflictPolicy = ConflictPolicy.do_nothing,
) -> str:
"""
Schedule a task to be run one or more times in the future.
:param func_or_task_id: either a callable or an ID of an existing task
definition
:param trigger: determines the times when the task should be run
:param id: an explicit identifier for the schedule (if omitted, a random, UUID
based ID will be assigned)
:param args: positional arguments to be passed to the task function
:param kwargs: keyword arguments to be passed to the task function
:param coalesce: determines what to do when processing the schedule if multiple
fire times have become due for this schedule since the last processing
:param misfire_grace_time: maximum number of seconds the scheduled job's actual
run time is allowed to be late, compared to the scheduled run time
:param max_jitter: maximum number of seconds to randomly add to the scheduled
time for each job created from this schedule
:param tags: strings that can be used to categorize and filter the schedule and
its derivative jobs
:param conflict_policy: determines what to do if a schedule with the same ID
already exists in the data store
:return: the ID of the newly added schedule
"""
self._ensure_services_ready()
id = id or str(uuid4())
args = tuple(args or ())
kwargs = dict(kwargs or {})
tags = frozenset(tags or ())
if isinstance(misfire_grace_time, (int, float)):
misfire_grace_time = timedelta(seconds=misfire_grace_time)
if callable(func_or_task_id):
task = Task(id=callable_to_ref(func_or_task_id), func=func_or_task_id)
self.data_store.add_task(task)
else:
task = self.data_store.get_task(func_or_task_id)
schedule = Schedule(
id=id,
task_id=task.id,
trigger=trigger,
args=args,
kwargs=kwargs,
coalesce=coalesce,
misfire_grace_time=misfire_grace_time,
max_jitter=max_jitter,
tags=tags,
)
schedule.next_fire_time = trigger.next()
self.data_store.add_schedule(schedule, conflict_policy)
self.logger.info(
"Added new schedule (task=%r, trigger=%r); next run time at %s",
task,
trigger,
schedule.next_fire_time,
)
return schedule.id
def get_schedule(self, id: str) -> Schedule:
"""
Retrieve a schedule from the data store.
:param id: the unique identifier of the schedule
:raises ScheduleLookupError: if the schedule could not be found
"""
self._ensure_services_ready()
schedules = self.data_store.get_schedules({id})
if schedules:
return schedules[0]
else:
raise ScheduleLookupError(id)
def get_schedules(self) -> list[Schedule]:
"""
Retrieve all schedules from the data store.
:return: a list of schedules, in an unspecified order
"""
self._ensure_services_ready()
return self.data_store.get_schedules()
def remove_schedule(self, id: str) -> None:
"""
Remove the given schedule from the data store.
:param id: the unique identifier of the schedule
"""
self._ensure_services_ready()
self.data_store.remove_schedules({id})
def add_job(
self,
func_or_task_id: str | Callable,
*,
args: Iterable | None = None,
kwargs: Mapping[str, Any] | None = None,
tags: Iterable[str] | None = None,
result_expiration_time: timedelta | float = 0,
) -> UUID:
"""
Add a job to the data store.
:param func_or_task_id: either a callable or an ID of an existing task
definition
:param args: positional arguments to be passed to the task function
:param kwargs: keyword arguments to be passed to the task function
:param tags: strings that can be used to categorize and filter the job
:param result_expiration_time: the minimum time (as seconds, or timedelta) to
keep the result of the job available for fetching (the result won't be
saved at all if that time is 0)
:return: the ID of the newly created job
"""
self._ensure_services_ready()
if callable(func_or_task_id):
task = Task(id=callable_to_ref(func_or_task_id), func=func_or_task_id)
self.data_store.add_task(task)
else:
task = self.data_store.get_task(func_or_task_id)
job = Job(
task_id=task.id,
args=args or (),
kwargs=kwargs or {},
tags=tags or frozenset(),
result_expiration_time=result_expiration_time,
)
self.data_store.add_job(job)
return job.id
def get_job_result(self, job_id: UUID, *, wait: bool = True) -> JobResult:
"""
Retrieve the result of a job.
:param job_id: the ID of the job
:param wait: if ``True``, wait until the job has ended (one way or another),
``False`` to raise an exception if the result is not yet available
:raises JobLookupError: if ``wait=False`` and the job result does not exist in
the data store
"""
self._ensure_services_ready()
wait_event = threading.Event()
def listener(event: JobReleased) -> None:
if event.job_id == job_id:
wait_event.set()
with self.data_store.events.subscribe(listener, {JobReleased}, one_shot=True):
result = self.data_store.get_job_result(job_id)
if result:
return result
elif not wait:
raise JobLookupError(job_id)
wait_event.wait()
return self.data_store.get_job_result(job_id)
def run_job(
self,
func_or_task_id: str | Callable,
*,
args: Iterable | None = None,
kwargs: Mapping[str, Any] | None = None,
tags: Iterable[str] | None = (),
) -> Any:
"""
Convenience method to add a job and then return its result.
If the job raised an exception, that exception will be reraised here.
:param func_or_task_id: either a callable or an ID of an existing task
definition
:param args: positional arguments to be passed to the task function
:param kwargs: keyword arguments to be passed to the task function
:param tags: strings that can be used to categorize and filter the job
:returns: the return value of the task function
"""
self._ensure_services_ready()
job_complete_event = threading.Event()
def listener(event: JobReleased) -> None:
if event.job_id == job_id:
job_complete_event.set()
job_id: UUID | None = None
with self.data_store.events.subscribe(listener, {JobReleased}):
job_id = self.add_job(
func_or_task_id,
args=args,
kwargs=kwargs,
tags=tags,
result_expiration_time=timedelta(minutes=15),
)
job_complete_event.wait()
result = self.get_job_result(job_id)
if result.outcome is JobOutcome.success:
return result.return_value
elif result.outcome is JobOutcome.error:
raise result.exception
elif result.outcome is JobOutcome.missed_start_deadline:
raise JobDeadlineMissed
elif result.outcome is JobOutcome.cancelled:
raise JobCancelled
else:
raise RuntimeError(f"Unknown job outcome: {result.outcome}")
def start_in_background(self) -> None:
"""
Launch the scheduler in a new thread.
This method registers :mod:`atexit` hooks to shut down the scheduler and wait
for the thread to finish.
:raises RuntimeError: if the scheduler is not in the ``stopped`` state
"""
with self._lock:
if self._state is not RunState.stopped:
raise RuntimeError(
f'Cannot start the scheduler when it is in the "{self._state}" '
f"state"
)
self._state = RunState.starting
start_future: Future[None] = Future()
self._thread = threading.Thread(
target=self._run, args=[start_future], daemon=True
)
self._thread.start()
try:
start_future.result()
except BaseException:
self._thread = None
raise
atexit.register(self._join_thread)
atexit.register(self.stop)
def stop(self) -> None:
"""
Signal the scheduler that it should stop processing schedules.
This method does not wait for the scheduler to actually stop.
For that, see :meth:`wait_until_stopped`.
"""
with self._lock:
if self._state is RunState.started:
self._state = RunState.stopping
self._wakeup_event.set()
def wait_until_stopped(self) -> None:
"""
Wait until the scheduler is in the "stopped" or "stopping" state.
If the scheduler is already stopped or in the process of stopping, this method
returns immediately. Otherwise, it waits until the scheduler posts the
``SchedulerStopped`` event.
"""
with self._lock:
if self._state in (RunState.stopped, RunState.stopping):
return
event = threading.Event()
sub = self.event_broker.subscribe(
lambda ev: event.set(), {SchedulerStopped}, one_shot=True
)
with sub:
event.wait()
def run_until_stopped(self) -> None:
"""
Run the scheduler (and its internal worker) until it is explicitly stopped.
This method will only return if :meth:`stop` is called.
"""
with self._lock:
if self._state is not RunState.stopped:
raise RuntimeError(
f'Cannot start the scheduler when it is in the "{self._state}" '
f"state"
)
self._state = RunState.starting
self._run(None)
def _run(self, start_future: Future[None] | None) -> None:
assert self._state is RunState.starting
with self._exit_stack.pop_all() as exit_stack:
try:
self._ensure_services_ready(exit_stack)
# Wake up the scheduler if the data store emits a significant schedule
# event
exit_stack.enter_context(
self.data_store.events.subscribe(
self._schedule_added_or_modified,
{ScheduleAdded, ScheduleUpdated},
)
)
# Start the built-in worker, if configured to do so
if self.start_worker:
token = current_scheduler.set(self)
exit_stack.callback(current_scheduler.reset, token)
worker = Worker(
self.data_store, self.event_broker, is_internal=True
)
exit_stack.enter_context(worker)
# Signal that the scheduler has started
self._state = RunState.started
self.event_broker.publish_local(SchedulerStarted())
except BaseException as exc:
if start_future:
start_future.set_exception(exc)
return
else:
raise
else:
if start_future:
start_future.set_result(None)
exception: BaseException | None = None
try:
while self._state is RunState.started:
schedules = self.data_store.acquire_schedules(self.identity, 100)
self.logger.debug(
"Processing %d schedules retrieved from the data store",
len(schedules),
)
now = datetime.now(timezone.utc)
for schedule in schedules:
# Calculate a next fire time for the schedule, if possible
fire_times = [schedule.next_fire_time]
calculate_next = schedule.trigger.next
while True:
try:
fire_time = calculate_next()
except Exception:
self.logger.exception(
"Error computing next fire time for schedule %r of "
"task %r – removing schedule",
schedule.id,
schedule.task_id,
)
break
# Stop if the calculated fire time is in the future
if fire_time is None or fire_time > now:
schedule.next_fire_time = fire_time
break
# Only keep all the fire times if coalesce policy = "all"
if schedule.coalesce is CoalescePolicy.all:
fire_times.append(fire_time)
elif schedule.coalesce is CoalescePolicy.latest:
fire_times[0] = fire_time
# Add one or more jobs to the job queue
max_jitter = (
schedule.max_jitter.total_seconds()
if schedule.max_jitter
else 0
)
for i, fire_time in enumerate(fire_times):
# Calculate a jitter if max_jitter > 0
jitter = _zero_timedelta
if max_jitter:
if i + 1 < len(fire_times):
next_fire_time = fire_times[i + 1]
else:
next_fire_time = schedule.next_fire_time
if next_fire_time is not None:
# Jitter must never be so high that it would cause
# a fire time to equal or exceed the next fire time
jitter_s = min(
[
max_jitter,
(
next_fire_time
- fire_time
- _microsecond_delta
).total_seconds(),
]
)
jitter = timedelta(
seconds=random.uniform(0, jitter_s)
)
fire_time += jitter
schedule.last_fire_time = fire_time
job = Job(
task_id=schedule.task_id,
args=schedule.args,
kwargs=schedule.kwargs,
schedule_id=schedule.id,
scheduled_fire_time=fire_time,
jitter=jitter,
start_deadline=schedule.next_deadline,
tags=schedule.tags,
)
self.data_store.add_job(job)
# Update the schedules (and release the scheduler's claim on them)
self.data_store.release_schedules(self.identity, schedules)
# If we received fewer schedules than the maximum amount, sleep
# until the next schedule is due or the scheduler is explicitly
# woken up
wait_time = None
if len(schedules) < 100:
self._wakeup_deadline = (
self.data_store.get_next_schedule_run_time()
)
if self._wakeup_deadline:
wait_time = (
self._wakeup_deadline - datetime.now(timezone.utc)
).total_seconds()
self.logger.debug(
"Sleeping %.3f seconds until the next fire time (%s)",
wait_time,
self._wakeup_deadline,
)
else:
self.logger.debug("Waiting for any due schedules to appear")
if self._wakeup_event.wait(wait_time):
self._wakeup_event = threading.Event()
else:
self.logger.debug(
"Processing more schedules on the next iteration"
)
except BaseException as exc:
exception = exc
raise
finally:
self._state = RunState.stopped
if isinstance(exception, Exception):
self.logger.exception("Scheduler crashed")
elif exception:
self.logger.info(
f"Scheduler stopped due to {exception.__class__.__name__}"
)
else:
self.logger.info("Scheduler stopped")
self.event_broker.publish_local(SchedulerStopped(exception=exception)) | APScheduler | /APScheduler-4.0.0a1.tar.gz/APScheduler-4.0.0a1/src/apscheduler/schedulers/sync.py | sync.py |
from __future__ import annotations
import os
import platform
import random
from contextlib import AsyncExitStack
from datetime import datetime, timedelta, timezone
from logging import Logger, getLogger
from typing import Any, Callable, Iterable, Mapping, cast
from uuid import UUID, uuid4
import anyio
import attrs
from anyio import TASK_STATUS_IGNORED, create_task_group, move_on_after
from anyio.abc import TaskGroup, TaskStatus
from .._context import current_scheduler
from .._converters import as_async_datastore, as_async_eventbroker
from .._enums import CoalescePolicy, ConflictPolicy, JobOutcome, RunState
from .._events import (
Event,
JobReleased,
ScheduleAdded,
SchedulerStarted,
SchedulerStopped,
ScheduleUpdated,
)
from .._exceptions import (
JobCancelled,
JobDeadlineMissed,
JobLookupError,
ScheduleLookupError,
)
from .._structures import Job, JobResult, Schedule, Task
from ..abc import AsyncDataStore, AsyncEventBroker, Subscription, Trigger
from ..datastores.memory import MemoryDataStore
from ..eventbrokers.async_local import LocalAsyncEventBroker
from ..marshalling import callable_to_ref
from ..workers.async_ import AsyncWorker
_microsecond_delta = timedelta(microseconds=1)
_zero_timedelta = timedelta()
@attrs.define(eq=False)
class AsyncScheduler:
"""An asynchronous (AnyIO based) scheduler implementation."""
data_store: AsyncDataStore = attrs.field(
converter=as_async_datastore, factory=MemoryDataStore
)
event_broker: AsyncEventBroker = attrs.field(
converter=as_async_eventbroker, factory=LocalAsyncEventBroker
)
identity: str = attrs.field(kw_only=True, default=None)
start_worker: bool = attrs.field(kw_only=True, default=True)
logger: Logger | None = attrs.field(kw_only=True, default=getLogger(__name__))
_state: RunState = attrs.field(init=False, default=RunState.stopped)
_task_group: TaskGroup | None = attrs.field(init=False, default=None)
_wakeup_event: anyio.Event = attrs.field(init=False)
_wakeup_deadline: datetime | None = attrs.field(init=False, default=None)
_schedule_added_subscription: Subscription = attrs.field(init=False)
def __attrs_post_init__(self) -> None:
if not self.identity:
self.identity = f"{platform.node()}-{os.getpid()}-{id(self)}"
async def __aenter__(self):
self._task_group = create_task_group()
await self._task_group.__aenter__()
await self._task_group.start(self._run)
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.stop()
await self._task_group.__aexit__(exc_type, exc_val, exc_tb)
self._task_group = None
def _schedule_added_or_modified(self, event: Event) -> None:
event_ = cast("ScheduleAdded | ScheduleUpdated", event)
if not self._wakeup_deadline or (
event_.next_fire_time and event_.next_fire_time < self._wakeup_deadline
):
self.logger.debug(
"Detected a %s event – waking up the scheduler", type(event).__name__
)
self._wakeup_event.set()
@property
def state(self) -> RunState:
"""The current running state of the scheduler."""
return self._state
async def add_schedule(
self,
func_or_task_id: str | Callable,
trigger: Trigger,
*,
id: str | None = None,
args: Iterable | None = None,
kwargs: Mapping[str, Any] | None = None,
coalesce: CoalescePolicy = CoalescePolicy.latest,
misfire_grace_time: float | timedelta | None = None,
max_jitter: float | timedelta | None = None,
tags: Iterable[str] | None = None,
conflict_policy: ConflictPolicy = ConflictPolicy.do_nothing,
) -> str:
"""
Schedule a task to be run one or more times in the future.
:param func_or_task_id: either a callable or an ID of an existing task
definition
:param trigger: determines the times when the task should be run
:param id: an explicit identifier for the schedule (if omitted, a random, UUID
based ID will be assigned)
:param args: positional arguments to be passed to the task function
:param kwargs: keyword arguments to be passed to the task function
:param coalesce: determines what to do when processing the schedule if multiple
fire times have become due for this schedule since the last processing
:param misfire_grace_time: maximum number of seconds the scheduled job's actual
run time is allowed to be late, compared to the scheduled run time
:param max_jitter: maximum number of seconds to randomly add to the scheduled
time for each job created from this schedule
:param tags: strings that can be used to categorize and filter the schedule and
its derivative jobs
:param conflict_policy: determines what to do if a schedule with the same ID
already exists in the data store
:return: the ID of the newly added schedule
"""
id = id or str(uuid4())
args = tuple(args or ())
kwargs = dict(kwargs or {})
tags = frozenset(tags or ())
if isinstance(misfire_grace_time, (int, float)):
misfire_grace_time = timedelta(seconds=misfire_grace_time)
if callable(func_or_task_id):
task = Task(id=callable_to_ref(func_or_task_id), func=func_or_task_id)
await self.data_store.add_task(task)
else:
task = await self.data_store.get_task(func_or_task_id)
schedule = Schedule(
id=id,
task_id=task.id,
trigger=trigger,
args=args,
kwargs=kwargs,
coalesce=coalesce,
misfire_grace_time=misfire_grace_time,
max_jitter=max_jitter,
tags=tags,
)
schedule.next_fire_time = trigger.next()
await self.data_store.add_schedule(schedule, conflict_policy)
self.logger.info(
"Added new schedule (task=%r, trigger=%r); next run time at %s",
task,
trigger,
schedule.next_fire_time,
)
return schedule.id
async def get_schedule(self, id: str) -> Schedule:
"""
Retrieve a schedule from the data store.
:param id: the unique identifier of the schedule
:raises ScheduleLookupError: if the schedule could not be found
"""
schedules = await self.data_store.get_schedules({id})
if schedules:
return schedules[0]
else:
raise ScheduleLookupError(id)
async def get_schedules(self) -> list[Schedule]:
"""
Retrieve all schedules from the data store.
:return: a list of schedules, in an unspecified order
"""
return await self.data_store.get_schedules()
async def remove_schedule(self, id: str) -> None:
"""
Remove the given schedule from the data store.
:param id: the unique identifier of the schedule
"""
await self.data_store.remove_schedules({id})
async def add_job(
self,
func_or_task_id: str | Callable,
*,
args: Iterable | None = None,
kwargs: Mapping[str, Any] | None = None,
tags: Iterable[str] | None = None,
result_expiration_time: timedelta | float = 0,
) -> UUID:
"""
Add a job to the data store.
:param func_or_task_id:
:param args: positional arguments to call the target callable with
:param kwargs: keyword arguments to call the target callable with
:param tags: strings that can be used to categorize and filter the job
:param result_expiration_time: the minimum time (as seconds, or timedelta) to
keep the result of the job available for fetching (the result won't be
saved at all if that time is 0)
:return: the ID of the newly created job
"""
if callable(func_or_task_id):
task = Task(id=callable_to_ref(func_or_task_id), func=func_or_task_id)
await self.data_store.add_task(task)
else:
task = await self.data_store.get_task(func_or_task_id)
job = Job(
task_id=task.id,
args=args or (),
kwargs=kwargs or {},
tags=tags or frozenset(),
result_expiration_time=result_expiration_time,
)
await self.data_store.add_job(job)
return job.id
async def get_job_result(self, job_id: UUID, *, wait: bool = True) -> JobResult:
"""
Retrieve the result of a job.
:param job_id: the ID of the job
:param wait: if ``True``, wait until the job has ended (one way or another),
``False`` to raise an exception if the result is not yet available
:raises JobLookupError: if ``wait=False`` and the job result does not exist in
the data store
"""
wait_event = anyio.Event()
def listener(event: JobReleased) -> None:
if event.job_id == job_id:
wait_event.set()
with self.data_store.events.subscribe(listener, {JobReleased}):
result = await self.data_store.get_job_result(job_id)
if result:
return result
elif not wait:
raise JobLookupError(job_id)
await wait_event.wait()
return await self.data_store.get_job_result(job_id)
async def run_job(
self,
func_or_task_id: str | Callable,
*,
args: Iterable | None = None,
kwargs: Mapping[str, Any] | None = None,
tags: Iterable[str] | None = (),
) -> Any:
"""
Convenience method to add a job and then return its result.
If the job raised an exception, that exception will be reraised here.
:param func_or_task_id: either a callable or an ID of an existing task
definition
:param args: positional arguments to be passed to the task function
:param kwargs: keyword arguments to be passed to the task function
:param tags: strings that can be used to categorize and filter the job
:returns: the return value of the task function
"""
job_complete_event = anyio.Event()
def listener(event: JobReleased) -> None:
if event.job_id == job_id:
job_complete_event.set()
job_id: UUID | None = None
with self.data_store.events.subscribe(listener, {JobReleased}):
job_id = await self.add_job(
func_or_task_id,
args=args,
kwargs=kwargs,
tags=tags,
result_expiration_time=timedelta(minutes=15),
)
await job_complete_event.wait()
result = await self.get_job_result(job_id)
if result.outcome is JobOutcome.success:
return result.return_value
elif result.outcome is JobOutcome.error:
raise result.exception
elif result.outcome is JobOutcome.missed_start_deadline:
raise JobDeadlineMissed
elif result.outcome is JobOutcome.cancelled:
raise JobCancelled
else:
raise RuntimeError(f"Unknown job outcome: {result.outcome}")
async def stop(self) -> None:
"""
Signal the scheduler that it should stop processing schedules.
This method does not wait for the scheduler to actually stop.
For that, see :meth:`wait_until_stopped`.
"""
if self._state is RunState.started:
self._state = RunState.stopping
self._wakeup_event.set()
async def wait_until_stopped(self) -> None:
"""
Wait until the scheduler is in the "stopped" or "stopping" state.
If the scheduler is already stopped or in the process of stopping, this method
returns immediately. Otherwise, it waits until the scheduler posts the
``SchedulerStopped`` event.
"""
if self._state in (RunState.stopped, RunState.stopping):
return
event = anyio.Event()
with self.event_broker.subscribe(
lambda ev: event.set(), {SchedulerStopped}, one_shot=True
):
await event.wait()
async def _run(self, *, task_status: TaskStatus = TASK_STATUS_IGNORED) -> None:
if self._state is not RunState.stopped:
raise RuntimeError(
f'Cannot start the scheduler when it is in the "{self._state}" '
f"state"
)
self._state = RunState.starting
async with AsyncExitStack() as exit_stack:
self._wakeup_event = anyio.Event()
# Initialize the event broker
await self.event_broker.start()
exit_stack.push_async_exit(
lambda *exc_info: self.event_broker.stop(force=exc_info[0] is not None)
)
# Initialize the data store
await self.data_store.start(self.event_broker)
exit_stack.push_async_exit(
lambda *exc_info: self.data_store.stop(force=exc_info[0] is not None)
)
# Wake up the scheduler if the data store emits a significant schedule event
exit_stack.enter_context(
self.event_broker.subscribe(
self._schedule_added_or_modified, {ScheduleAdded, ScheduleUpdated}
)
)
# Start the built-in worker, if configured to do so
if self.start_worker:
token = current_scheduler.set(self)
exit_stack.callback(current_scheduler.reset, token)
worker = AsyncWorker(
self.data_store, self.event_broker, is_internal=True
)
await exit_stack.enter_async_context(worker)
# Signal that the scheduler has started
self._state = RunState.started
task_status.started()
await self.event_broker.publish_local(SchedulerStarted())
exception: BaseException | None = None
try:
while self._state is RunState.started:
schedules = await self.data_store.acquire_schedules(
self.identity, 100
)
now = datetime.now(timezone.utc)
for schedule in schedules:
# Calculate a next fire time for the schedule, if possible
fire_times = [schedule.next_fire_time]
calculate_next = schedule.trigger.next
while True:
try:
fire_time = calculate_next()
except Exception:
self.logger.exception(
"Error computing next fire time for schedule %r of "
"task %r – removing schedule",
schedule.id,
schedule.task_id,
)
break
# Stop if the calculated fire time is in the future
if fire_time is None or fire_time > now:
schedule.next_fire_time = fire_time
break
# Only keep all the fire times if coalesce policy = "all"
if schedule.coalesce is CoalescePolicy.all:
fire_times.append(fire_time)
elif schedule.coalesce is CoalescePolicy.latest:
fire_times[0] = fire_time
# Add one or more jobs to the job queue
max_jitter = (
schedule.max_jitter.total_seconds()
if schedule.max_jitter
else 0
)
for i, fire_time in enumerate(fire_times):
# Calculate a jitter if max_jitter > 0
jitter = _zero_timedelta
if max_jitter:
if i + 1 < len(fire_times):
next_fire_time = fire_times[i + 1]
else:
next_fire_time = schedule.next_fire_time
if next_fire_time is not None:
# Jitter must never be so high that it would cause a
# fire time to equal or exceed the next fire time
jitter_s = min(
[
max_jitter,
(
next_fire_time
- fire_time
- _microsecond_delta
).total_seconds(),
]
)
jitter = timedelta(
seconds=random.uniform(0, jitter_s)
)
fire_time += jitter
schedule.last_fire_time = fire_time
job = Job(
task_id=schedule.task_id,
args=schedule.args,
kwargs=schedule.kwargs,
schedule_id=schedule.id,
scheduled_fire_time=fire_time,
jitter=jitter,
start_deadline=schedule.next_deadline,
tags=schedule.tags,
)
await self.data_store.add_job(job)
# Update the schedules (and release the scheduler's claim on them)
await self.data_store.release_schedules(self.identity, schedules)
# If we received fewer schedules than the maximum amount, sleep
# until the next schedule is due or the scheduler is explicitly
# woken up
wait_time = None
if len(schedules) < 100:
self._wakeup_deadline = (
await self.data_store.get_next_schedule_run_time()
)
if self._wakeup_deadline:
wait_time = (
self._wakeup_deadline - datetime.now(timezone.utc)
).total_seconds()
self.logger.debug(
"Sleeping %.3f seconds until the next fire time (%s)",
wait_time,
self._wakeup_deadline,
)
else:
self.logger.debug("Waiting for any due schedules to appear")
with move_on_after(wait_time):
await self._wakeup_event.wait()
self._wakeup_event = anyio.Event()
else:
self.logger.debug(
"Processing more schedules on the next iteration"
)
except BaseException as exc:
exception = exc
raise
finally:
self._state = RunState.stopped
if isinstance(exception, Exception):
self.logger.exception("Scheduler crashed")
elif exception:
self.logger.info(
f"Scheduler stopped due to {exception.__class__.__name__}"
)
else:
self.logger.info("Scheduler stopped")
with move_on_after(3, shield=True):
await self.event_broker.publish_local(
SchedulerStopped(exception=exception)
) | APScheduler | /APScheduler-4.0.0a1.tar.gz/APScheduler-4.0.0a1/src/apscheduler/schedulers/async_.py | async_.py |
from __future__ import annotations
from datetime import datetime
from json import dumps, loads
from typing import Any
from uuid import UUID
import attrs
from ..abc import Serializer
from ..marshalling import marshal_date, marshal_object, unmarshal_object
@attrs.define(kw_only=True, eq=False)
class JSONSerializer(Serializer):
"""
Serializes objects using JSON.
Can serialize types not normally CBOR serializable, if they implement
``__getstate__()`` and ``__setstate__()``. These objects are serialized into dicts
that contain the necessary information for deserialization in ``magic_key``.
:param magic_key: name of a specially handled dict key that indicates that a dict
contains a serialized instance of an arbitrary type
:param dump_options: keyword arguments passed to :func:`json.dumps`
:param load_options: keyword arguments passed to :func:`json.loads`
"""
magic_key: str = "_apscheduler_json"
dump_options: dict[str, Any] = attrs.field(factory=dict)
load_options: dict[str, Any] = attrs.field(factory=dict)
def __attrs_post_init__(self):
self.dump_options["default"] = self._default_hook
self.load_options["object_hook"] = self._object_hook
def _default_hook(self, obj):
if isinstance(obj, datetime):
return marshal_date(obj)
elif isinstance(obj, UUID):
return str(obj)
elif hasattr(obj, "__getstate__"):
cls_ref, state = marshal_object(obj)
return {self.magic_key: [cls_ref, state]}
raise TypeError(
f"Object of type {obj.__class__.__name__!r} is not JSON serializable"
)
def _object_hook(self, obj_state: dict[str, Any]):
if self.magic_key in obj_state:
ref, state = obj_state[self.magic_key]
return unmarshal_object(ref, state)
return obj_state
def serialize(self, obj) -> bytes:
return dumps(obj, ensure_ascii=False, **self.dump_options).encode("utf-8")
def deserialize(self, serialized: bytes):
return loads(serialized, **self.load_options) | APScheduler | /APScheduler-4.0.0a1.tar.gz/APScheduler-4.0.0a1/src/apscheduler/serializers/json.py | json.py |
from __future__ import annotations
from datetime import date, datetime, time, timedelta, tzinfo
from typing import Any
import attrs
from .._utils import timezone_repr
from .._validators import as_date, as_timezone, require_state_version
from ..abc import Trigger
from ..marshalling import (
marshal_date,
marshal_timezone,
unmarshal_date,
unmarshal_timezone,
)
@attrs.define(kw_only=True)
class CalendarIntervalTrigger(Trigger):
"""
Runs the task on specified calendar-based intervals always at the same exact time of
day.
When calculating the next date, the ``years`` and ``months`` parameters are first
added to the previous date while keeping the day of the month constant. This is
repeated until the resulting date is valid. After that, the ``weeks`` and ``days``
parameters are added to that date. Finally, the date is combined with the given time
(hour, minute, second) to form the final datetime.
This means that if the ``days`` or ``weeks`` parameters are not used, the task will
always be executed on the same day of the month at the same wall clock time,
assuming the date and time are valid.
If the resulting datetime is invalid due to a daylight saving forward shift, the
date is discarded and the process moves on to the next date. If instead the datetime
is ambiguous due to a backward DST shift, the earlier of the two resulting datetimes
is used.
If no previous run time is specified when requesting a new run time (like when
starting for the first time or resuming after being paused), ``start_date`` is used
as a reference and the next valid datetime equal to or later than the current time
will be returned. Otherwise, the next valid datetime starting from the previous run
time is returned, even if it's in the past.
.. warning:: Be wary of setting a start date near the end of the month (29. – 31.)
if you have ``months`` specified in your interval, as this will skip the months
when those days do not exist. Likewise, setting the start date on the leap day
(February 29th) and having `years`` defined may cause some years to be skipped.
Users are also discouraged from using a time inside the target timezone's DST
switching period (typically around 2 am) since a date could either be skipped or
repeated due to the specified wall clock time either occurring twice or not at
all.
:param years: number of years to wait
:param months: number of months to wait
:param weeks: number of weeks to wait
:param days: number of days to wait
:param hour: hour to run the task at
:param minute: minute to run the task at
:param second: second to run the task at
:param start_date: first date to trigger on (defaults to current date if omitted)
:param end_date: latest possible date to trigger on
:param timezone: time zone to use for calculating the next fire time
"""
years: int = 0
months: int = 0
weeks: int = 0
days: int = 0
hour: int = 0
minute: int = 0
second: int = 0
start_date: date = attrs.field(converter=as_date, factory=date.today)
end_date: date | None = attrs.field(converter=as_date, default=None)
timezone: tzinfo = attrs.field(converter=as_timezone, default="local")
_time: time = attrs.field(init=False, eq=False)
_last_fire_date: date | None = attrs.field(init=False, eq=False, default=None)
def __attrs_post_init__(self) -> None:
self._time = time(self.hour, self.minute, self.second, tzinfo=self.timezone)
if self.years == self.months == self.weeks == self.days == 0:
raise ValueError("interval must be at least 1 day long")
if self.start_date and self.end_date and self.start_date > self.end_date:
raise ValueError("end_date cannot be earlier than start_date")
def next(self) -> datetime | None:
previous_date: date = self._last_fire_date
while True:
if previous_date:
year, month = previous_date.year, previous_date.month
while True:
month += self.months
year += self.years + (month - 1) // 12
month = (month - 1) % 12 + 1
try:
next_date = date(year, month, previous_date.day)
except ValueError:
pass # Nonexistent date
else:
next_date += timedelta(self.days + self.weeks * 7)
break
else:
next_date = self.start_date
# Don't return any date past end_date
if self.end_date and next_date > self.end_date:
return None
# Combine the date with the designated time and normalize the result
timestamp = datetime.combine(next_date, self._time).timestamp()
next_time = datetime.fromtimestamp(timestamp, self.timezone)
# Check if the time is off due to normalization and a forward DST shift
if next_time.time() != self._time:
previous_date = next_time.date()
else:
self._last_fire_date = next_date
return next_time
def __getstate__(self) -> dict[str, Any]:
return {
"version": 1,
"interval": [self.years, self.months, self.weeks, self.days],
"time": [self._time.hour, self._time.minute, self._time.second],
"start_date": marshal_date(self.start_date),
"end_date": marshal_date(self.end_date),
"timezone": marshal_timezone(self.timezone),
"last_fire_date": marshal_date(self._last_fire_date),
}
def __setstate__(self, state: dict[str, Any]) -> None:
require_state_version(self, state, 1)
self.years, self.months, self.weeks, self.days = state["interval"]
self.start_date = unmarshal_date(state["start_date"])
self.end_date = unmarshal_date(state["end_date"])
self.timezone = unmarshal_timezone(state["timezone"])
self._time = time(*state["time"], tzinfo=self.timezone)
self._last_fire_date = unmarshal_date(state["last_fire_date"])
def __repr__(self) -> str:
fields = []
for field in "years", "months", "weeks", "days":
value = getattr(self, field)
if value > 0:
fields.append(f"{field}={value}")
fields.append(f"time={self._time.isoformat()!r}")
fields.append(f"start_date='{self.start_date}'")
if self.end_date:
fields.append(f"end_date='{self.end_date}'")
fields.append(f"timezone={timezone_repr(self.timezone)!r}")
return f'{self.__class__.__name__}({", ".join(fields)})' | APScheduler | /APScheduler-4.0.0a1.tar.gz/APScheduler-4.0.0a1/src/apscheduler/triggers/calendarinterval.py | calendarinterval.py |
from __future__ import annotations
from datetime import datetime, timedelta
from typing import Any
import attrs
from .._validators import as_aware_datetime, require_state_version
from ..abc import Trigger
from ..marshalling import marshal_date, unmarshal_date
@attrs.define(kw_only=True)
class IntervalTrigger(Trigger):
"""
Triggers on specified intervals.
The first trigger time is on ``start_time`` which is the moment the trigger was
created unless specifically overridden. If ``end_time`` is specified, the last
trigger time will be at or before that time. If no ``end_time`` has been given, the
trigger will produce new trigger times as long as the resulting datetimes are valid
datetimes in Python.
:param weeks: number of weeks to wait
:param days: number of days to wait
:param hours: number of hours to wait
:param minutes: number of minutes to wait
:param seconds: number of seconds to wait
:param microseconds: number of microseconds to wait
:param start_time: first trigger date/time (defaults to current date/time if
omitted)
:param end_time: latest possible date/time to trigger on
"""
weeks: float = 0
days: float = 0
hours: float = 0
minutes: float = 0
seconds: float = 0
microseconds: float = 0
start_time: datetime = attrs.field(
converter=as_aware_datetime, factory=datetime.now
)
end_time: datetime | None = attrs.field(converter=as_aware_datetime, default=None)
_interval: timedelta = attrs.field(init=False, eq=False, repr=False)
_last_fire_time: datetime | None = attrs.field(init=False, eq=False, default=None)
def __attrs_post_init__(self) -> None:
self._interval = timedelta(
weeks=self.weeks,
days=self.days,
hours=self.hours,
minutes=self.minutes,
seconds=self.seconds,
microseconds=self.microseconds,
)
if self._interval.total_seconds() <= 0:
raise ValueError("The time interval must be positive")
if self.end_time and self.end_time < self.start_time:
raise ValueError("end_time cannot be earlier than start_time")
def next(self) -> datetime | None:
if self._last_fire_time is None:
self._last_fire_time = self.start_time
else:
self._last_fire_time += self._interval
if self.end_time is None or self._last_fire_time <= self.end_time:
return self._last_fire_time
else:
return None
def __getstate__(self) -> dict[str, Any]:
return {
"version": 1,
"interval": [
self.weeks,
self.days,
self.hours,
self.minutes,
self.seconds,
self.microseconds,
],
"start_time": marshal_date(self.start_time),
"end_time": marshal_date(self.end_time),
"last_fire_time": marshal_date(self._last_fire_time),
}
def __setstate__(self, state: dict[str, Any]) -> None:
require_state_version(self, state, 1)
(
self.weeks,
self.days,
self.hours,
self.minutes,
self.seconds,
self.microseconds,
) = state["interval"]
self.start_time = unmarshal_date(state["start_time"])
self.end_time = unmarshal_date(state["end_time"])
self._last_fire_time = unmarshal_date(state["last_fire_time"])
self._interval = timedelta(
weeks=self.weeks,
days=self.days,
hours=self.hours,
minutes=self.minutes,
seconds=self.seconds,
microseconds=self.microseconds,
)
def __repr__(self) -> str:
fields = []
for field in "weeks", "days", "hours", "minutes", "seconds", "microseconds":
value = getattr(self, field)
if value > 0:
fields.append(f"{field}={value}")
fields.append(f"start_time='{self.start_time}'")
if self.end_time:
fields.append(f"end_time='{self.end_time}'")
return f'{self.__class__.__name__}({", ".join(fields)})' | APScheduler | /APScheduler-4.0.0a1.tar.gz/APScheduler-4.0.0a1/src/apscheduler/triggers/interval.py | interval.py |
from __future__ import annotations
from abc import abstractmethod
from datetime import datetime, timedelta
from typing import Any
import attrs
from .._exceptions import MaxIterationsReached
from .._validators import as_timedelta, require_state_version
from ..abc import Trigger
from ..marshalling import marshal_object, unmarshal_object
@attrs.define
class BaseCombiningTrigger(Trigger):
triggers: list[Trigger]
_next_fire_times: list[datetime | None] = attrs.field(
init=False, eq=False, factory=list
)
def __getstate__(self) -> dict[str, Any]:
return {
"version": 1,
"triggers": [marshal_object(trigger) for trigger in self.triggers],
"next_fire_times": self._next_fire_times,
}
@abstractmethod
def __setstate__(self, state: dict[str, Any]) -> None:
self.triggers = [
unmarshal_object(*trigger_state) for trigger_state in state["triggers"]
]
self._next_fire_times = state["next_fire_times"]
@attrs.define
class AndTrigger(BaseCombiningTrigger):
"""
Fires on times produced by the enclosed triggers whenever the fire times are within
the given threshold.
If the produced fire times are not within the given threshold of each other, the
trigger(s) that produced the earliest fire time will be asked for their next fire
time and the iteration is restarted. If instead all the triggers agree on a fire
time, all the triggers are asked for their next fire times and the earliest of the
previously produced fire times will be returned.
This trigger will be finished when any of the enclosed trigger has finished.
:param triggers: triggers to combine
:param threshold: maximum time difference between the next fire times of the
triggers in order for the earliest of them to be returned from :meth:`next` (in
seconds, or as timedelta)
:param max_iterations: maximum number of iterations of fire time calculations before
giving up
"""
threshold: timedelta = attrs.field(converter=as_timedelta, default=1)
max_iterations: int | None = 10000
def next(self) -> datetime | None:
if not self._next_fire_times:
# Fill out the fire times on the first run
self._next_fire_times = [t.next() for t in self.triggers]
for _ in range(self.max_iterations):
# Find the earliest and latest fire times
earliest_fire_time: datetime | None = None
latest_fire_time: datetime | None = None
for fire_time in self._next_fire_times:
# If any of the fire times is None, this trigger is finished
if fire_time is None:
return None
if earliest_fire_time is None or earliest_fire_time > fire_time:
earliest_fire_time = fire_time
if latest_fire_time is None or latest_fire_time < fire_time:
latest_fire_time = fire_time
# Replace all the fire times that were within the threshold
for i, _trigger in enumerate(self.triggers):
if self._next_fire_times[i] - earliest_fire_time <= self.threshold:
self._next_fire_times[i] = self.triggers[i].next()
# If all the fire times were within the threshold, return the earliest one
if latest_fire_time - earliest_fire_time <= self.threshold:
self._next_fire_times = [t.next() for t in self.triggers]
return earliest_fire_time
else:
raise MaxIterationsReached
def __getstate__(self) -> dict[str, Any]:
state = super().__getstate__()
state["threshold"] = self.threshold.total_seconds()
state["max_iterations"] = self.max_iterations
return state
def __setstate__(self, state: dict[str, Any]) -> None:
require_state_version(self, state, 1)
super().__setstate__(state)
self.threshold = timedelta(seconds=state["threshold"])
self.max_iterations = state["max_iterations"]
def __repr__(self) -> str:
return (
f"{self.__class__.__name__}({self.triggers}, "
f"threshold={self.threshold.total_seconds()}, "
f"max_iterations={self.max_iterations})"
)
@attrs.define
class OrTrigger(BaseCombiningTrigger):
"""
Fires on every fire time of every trigger in chronological order.
If two or more triggers produce the same fire time, it will only be used once.
This trigger will be finished when none of the enclosed triggers can produce any new
fire times.
:param triggers: triggers to combine
"""
def next(self) -> datetime | None:
# Fill out the fire times on the first run
if not self._next_fire_times:
self._next_fire_times = [t.next() for t in self.triggers]
# Find out the earliest of the fire times
earliest_time: datetime | None = min(
(fire_time for fire_time in self._next_fire_times if fire_time is not None),
default=None,
)
if earliest_time is not None:
# Generate new fire times for the trigger(s) that generated the earliest
# fire time
for i, fire_time in enumerate(self._next_fire_times):
if fire_time == earliest_time:
self._next_fire_times[i] = self.triggers[i].next()
return earliest_time
def __setstate__(self, state: dict[str, Any]) -> None:
require_state_version(self, state, 1)
super().__setstate__(state)
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.triggers})" | APScheduler | /APScheduler-4.0.0a1.tar.gz/APScheduler-4.0.0a1/src/apscheduler/triggers/combining.py | combining.py |
from __future__ import annotations
import re
from calendar import monthrange
from datetime import datetime
from ..._validators import as_int
WEEKDAYS = ["mon", "tue", "wed", "thu", "fri", "sat", "sun"]
MONTHS = [
"jan",
"feb",
"mar",
"apr",
"may",
"jun",
"jul",
"aug",
"sep",
"oct",
"nov",
"dec",
]
def get_weekday_index(weekday: str) -> int:
try:
return WEEKDAYS.index(weekday.lower())
except ValueError:
raise ValueError(f"Invalid weekday name {weekday!r}") from None
class AllExpression:
__slots__ = "step"
value_re = re.compile(r"\*(?:/(?P<step>\d+))?$")
def __init__(self, step: str | int | None = None):
self.step = as_int(step)
if self.step == 0:
raise ValueError("Step must be higher than 0")
def validate_range(self, field_name: str, min_value: int, max_value: int) -> None:
value_range = max_value - min_value
if self.step and self.step > value_range:
raise ValueError(
f"the step value ({self.step}) is higher than the total range of the "
f"expression ({value_range})"
)
def get_next_value(self, dateval: datetime, field) -> int | None:
start = field.get_value(dateval)
minval = field.get_min(dateval)
maxval = field.get_max(dateval)
start = max(start, minval)
if not self.step:
nextval = start
else:
distance_to_next = (self.step - (start - minval)) % self.step
nextval = start + distance_to_next
return nextval if nextval <= maxval else None
def __str__(self):
return f"*/{self.step}" if self.step else "*"
class RangeExpression(AllExpression):
__slots__ = "first", "last"
value_re = re.compile(r"(?P<first>\d+)(?:-(?P<last>\d+))?(?:/(?P<step>\d+))?$")
def __init__(
self,
first: str | int,
last: str | int | None = None,
step: str | int | None = None,
):
super().__init__(step)
self.first = as_int(first)
self.last = as_int(last)
if self.last is None and self.step is None:
self.last = self.first
if self.last is not None and self.first > self.last:
raise ValueError(
"The minimum value in a range must not be higher than the maximum"
)
def validate_range(self, field_name: str, min_value: int, max_value: int) -> None:
super().validate_range(field_name, min_value, max_value)
if self.first < min_value:
raise ValueError(
f"the first value ({self.first}) is lower than the minimum value "
f"({min_value})"
)
if self.last is not None and self.last > max_value:
raise ValueError(
f"the last value ({self.last}) is higher than the maximum value "
f"({max_value})"
)
value_range = (self.last or max_value) - self.first
if self.step and self.step > value_range:
raise ValueError(
f"the step value ({self.step}) is higher than the total range of the "
f"expression ({value_range})"
)
def get_next_value(self, date, field):
startval = field.get_value(date)
minval = field.get_min(date)
maxval = field.get_max(date)
# Apply range limits
minval = max(minval, self.first)
maxval = min(maxval, self.last) if self.last is not None else maxval
nextval = max(minval, startval)
# Apply the step if defined
if self.step:
distance_to_next = (self.step - (nextval - minval)) % self.step
nextval += distance_to_next
return nextval if nextval <= maxval else None
def __str__(self):
if self.last != self.first and self.last is not None:
rangeval = f"{self.first}-{self.last}"
else:
rangeval = str(self.first)
if self.step:
return f"{rangeval}/{self.step}"
return rangeval
class MonthRangeExpression(RangeExpression):
__slots__ = ()
value_re = re.compile(r"(?P<first>[a-z]+)(?:-(?P<last>[a-z]+))?", re.IGNORECASE)
def __init__(self, first, last=None):
try:
first_num = MONTHS.index(first.lower()) + 1
except ValueError:
raise ValueError(f"Invalid month name {first!r}") from None
if last:
try:
last_num = MONTHS.index(last.lower()) + 1
except ValueError:
raise ValueError(f"Invalid month name {last!r}") from None
else:
last_num = None
super().__init__(first_num, last_num)
def __str__(self):
if self.last != self.first and self.last is not None:
return f"{MONTHS[self.first - 1]}-{MONTHS[self.last - 1]}"
return MONTHS[self.first - 1]
class WeekdayRangeExpression(RangeExpression):
__slots__ = ()
value_re = re.compile(r"(?P<first>[a-z]+)(?:-(?P<last>[a-z]+))?", re.IGNORECASE)
def __init__(self, first: str, last: str | None = None):
first_num = get_weekday_index(first)
last_num = get_weekday_index(last) if last else None
super().__init__(first_num, last_num)
def __str__(self):
if self.last != self.first and self.last is not None:
return f"{WEEKDAYS[self.first]}-{WEEKDAYS[self.last]}"
return WEEKDAYS[self.first]
class WeekdayPositionExpression(AllExpression):
__slots__ = "option_num", "weekday"
options = ["1st", "2nd", "3rd", "4th", "5th", "last"]
value_re = re.compile(
r"(?P<option_name>%s) +(?P<weekday_name>(?:\d+|\w+))" % "|".join(options),
re.IGNORECASE,
)
def __init__(self, option_name: str, weekday_name: str):
super().__init__(None)
self.option_num = self.options.index(option_name.lower())
try:
self.weekday = WEEKDAYS.index(weekday_name.lower())
except ValueError:
raise ValueError(f"Invalid weekday name {weekday_name!r}") from None
def get_next_value(self, dateval: datetime, field) -> int | None:
# Figure out the weekday of the month's first day and the number of days in that
# month
first_day_wday, last_day = monthrange(dateval.year, dateval.month)
# Calculate which day of the month is the first of the target weekdays
first_hit_day = self.weekday - first_day_wday + 1
if first_hit_day <= 0:
first_hit_day += 7
# Calculate what day of the month the target weekday would be
if self.option_num < 5:
target_day = first_hit_day + self.option_num * 7
else:
target_day = first_hit_day + ((last_day - first_hit_day) // 7) * 7
if last_day >= target_day >= dateval.day:
return target_day
else:
return None
def __str__(self):
return f"{self.options[self.option_num]} {WEEKDAYS[self.weekday]}"
class LastDayOfMonthExpression(AllExpression):
__slots__ = ()
value_re = re.compile(r"last", re.IGNORECASE)
def __init__(self):
super().__init__(None)
def get_next_value(self, dateval: datetime, field):
return monthrange(dateval.year, dateval.month)[1]
def __str__(self):
return "last" | APScheduler | /APScheduler-4.0.0a1.tar.gz/APScheduler-4.0.0a1/src/apscheduler/triggers/cron/expressions.py | expressions.py |
from __future__ import annotations
from datetime import datetime, timedelta, tzinfo
from typing import Any, ClassVar, Sequence
import attrs
from tzlocal import get_localzone
from ..._utils import timezone_repr
from ..._validators import as_aware_datetime, as_timezone, require_state_version
from ...abc import Trigger
from ...marshalling import (
marshal_date,
marshal_timezone,
unmarshal_date,
unmarshal_timezone,
)
from .fields import (
DEFAULT_VALUES,
BaseField,
DayOfMonthField,
DayOfWeekField,
MonthField,
WeekField,
)
@attrs.define(kw_only=True)
class CronTrigger(Trigger):
"""
Triggers when current time matches all specified time constraints, similarly to how
the UNIX cron scheduler works.
:param year: 4-digit year
:param month: month (1-12)
:param day: day of the (1-31)
:param week: ISO week (1-53)
:param day_of_week: number or name of weekday (0-7 or sun,mon,tue,wed,thu,fri,sat,
sun)
:param hour: hour (0-23)
:param minute: minute (0-59)
:param second: second (0-59)
:param start_time: earliest possible date/time to trigger on (defaults to current
time)
:param end_time: latest possible date/time to trigger on
:param timezone: time zone to use for the date/time calculations
(defaults to the local timezone)
.. note:: The first weekday is always **monday**.
"""
FIELDS_MAP: ClassVar[list[tuple[str, type[BaseField]]]] = [
("year", BaseField),
("month", MonthField),
("day", DayOfMonthField),
("week", WeekField),
("day_of_week", DayOfWeekField),
("hour", BaseField),
("minute", BaseField),
("second", BaseField),
]
year: int | str | None = None
month: int | str | None = None
day: int | str | None = None
week: int | str | None = None
day_of_week: int | str | None = None
hour: int | str | None = None
minute: int | str | None = None
second: int | str | None = None
start_time: datetime = attrs.field(
converter=as_aware_datetime, factory=datetime.now
)
end_time: datetime | None = None
timezone: tzinfo | str = attrs.field(converter=as_timezone, factory=get_localzone)
_fields: list[BaseField] = attrs.field(init=False, eq=False, factory=list)
_last_fire_time: datetime | None = attrs.field(init=False, eq=False, default=None)
def __attrs_post_init__(self) -> None:
self._set_fields(
[
self.year,
self.month,
self.day,
self.week,
self.day_of_week,
self.hour,
self.minute,
self.second,
]
)
self._last_fire_time: datetime | None = None
def _set_fields(self, values: Sequence[int | str | None]) -> None:
self._fields = []
assigned_values = {
field_name: value
for (field_name, _), value in zip(self.FIELDS_MAP, values)
if value is not None
}
for field_name, field_class in self.FIELDS_MAP:
exprs = assigned_values.pop(field_name, None)
if exprs is None:
exprs = "*" if assigned_values else DEFAULT_VALUES[field_name]
field = field_class(field_name, exprs)
self._fields.append(field)
@classmethod
def from_crontab(cls, expr: str, timezone: str | tzinfo = "local") -> CronTrigger:
"""
Create a :class:`~CronTrigger` from a standard crontab expression.
See https://en.wikipedia.org/wiki/Cron for more information on the format
accepted here.
:param expr: minute, hour, day of month, month, day of week
:param timezone: time zone to use for the date/time calculations
(defaults to local timezone if omitted)
"""
values = expr.split()
if len(values) != 5:
raise ValueError(f"Wrong number of fields; got {len(values)}, expected 5")
return cls(
minute=values[0],
hour=values[1],
day=values[2],
month=values[3],
day_of_week=values[4],
timezone=timezone,
)
def _increment_field_value(
self, dateval: datetime, fieldnum: int
) -> tuple[datetime, int]:
"""
Increments the designated field and resets all less significant fields to their
minimum values.
:return: a tuple containing the new date, and the number of the field that was
actually incremented
"""
values = {}
i = 0
while i < len(self._fields):
field = self._fields[i]
if not field.real:
if i == fieldnum:
fieldnum -= 1
i -= 1
else:
i += 1
continue
if i < fieldnum:
values[field.name] = field.get_value(dateval)
i += 1
elif i > fieldnum:
values[field.name] = field.get_min(dateval)
i += 1
else:
value = field.get_value(dateval)
maxval = field.get_max(dateval)
if value == maxval:
fieldnum -= 1
i -= 1
else:
values[field.name] = value + 1
i += 1
difference = datetime(**values) - dateval.replace(tzinfo=None)
dateval = datetime.fromtimestamp(
dateval.timestamp() + difference.total_seconds(), self.timezone
)
return dateval, fieldnum
def _set_field_value(
self, dateval: datetime, fieldnum: int, new_value: int
) -> datetime:
values = {}
for i, field in enumerate(self._fields):
if field.real:
if i < fieldnum:
values[field.name] = field.get_value(dateval)
elif i > fieldnum:
values[field.name] = field.get_min(dateval)
else:
values[field.name] = new_value
return datetime(**values, tzinfo=self.timezone)
def next(self) -> datetime | None:
if self._last_fire_time:
start_time = self._last_fire_time + timedelta(microseconds=1)
else:
start_time = self.start_time
fieldnum = 0
next_time = datetime_ceil(start_time).astimezone(self.timezone)
while 0 <= fieldnum < len(self._fields):
field = self._fields[fieldnum]
curr_value = field.get_value(next_time)
next_value = field.get_next_value(next_time)
if next_value is None:
# No valid value was found
next_time, fieldnum = self._increment_field_value(
next_time, fieldnum - 1
)
elif next_value > curr_value:
# A valid, but higher than the starting value, was found
if field.real:
next_time = self._set_field_value(next_time, fieldnum, next_value)
fieldnum += 1
else:
next_time, fieldnum = self._increment_field_value(
next_time, fieldnum
)
else:
# A valid value was found, no changes necessary
fieldnum += 1
# Return if the date has rolled past the end date
if self.end_time and next_time > self.end_time:
return None
if fieldnum >= 0:
self._last_fire_time = next_time
return next_time
def __getstate__(self) -> dict[str, Any]:
return {
"version": 1,
"timezone": marshal_timezone(self.timezone),
"fields": [str(f) for f in self._fields],
"start_time": marshal_date(self.start_time),
"end_time": marshal_date(self.end_time),
"last_fire_time": marshal_date(self._last_fire_time),
}
def __setstate__(self, state: dict[str, Any]) -> None:
require_state_version(self, state, 1)
self.timezone = unmarshal_timezone(state["timezone"])
self.start_time = unmarshal_date(state["start_time"])
self.end_time = unmarshal_date(state["end_time"])
self._last_fire_time = unmarshal_date(state["last_fire_time"])
self._set_fields(state["fields"])
def __repr__(self) -> str:
fields = [f"{field.name}={str(field)!r}" for field in self._fields]
fields.append(f"start_time={self.start_time.isoformat()!r}")
if self.end_time:
fields.append(f"end_time={self.end_time.isoformat()!r}")
fields.append(f"timezone={timezone_repr(self.timezone)!r}")
return f'CronTrigger({", ".join(fields)})'
def datetime_ceil(dateval: datetime) -> datetime:
"""Round the given datetime object upwards."""
if dateval.microsecond > 0:
return dateval + timedelta(seconds=1, microseconds=-dateval.microsecond)
return dateval | APScheduler | /APScheduler-4.0.0a1.tar.gz/APScheduler-4.0.0a1/src/apscheduler/triggers/cron/__init__.py | __init__.py |
from __future__ import annotations
import re
from calendar import monthrange
from datetime import datetime
from typing import Any, ClassVar, Sequence
from .expressions import (
WEEKDAYS,
AllExpression,
LastDayOfMonthExpression,
MonthRangeExpression,
RangeExpression,
WeekdayPositionExpression,
WeekdayRangeExpression,
get_weekday_index,
)
MIN_VALUES = {
"year": 1970,
"month": 1,
"day": 1,
"week": 1,
"day_of_week": 0,
"hour": 0,
"minute": 0,
"second": 0,
}
MAX_VALUES = {
"year": 9999,
"month": 12,
"day": 31,
"week": 53,
"day_of_week": 7,
"hour": 23,
"minute": 59,
"second": 59,
}
DEFAULT_VALUES = {
"year": "*",
"month": 1,
"day": 1,
"week": "*",
"day_of_week": "*",
"hour": 0,
"minute": 0,
"second": 0,
}
SEPARATOR = re.compile(" *, *")
class BaseField:
__slots__ = "name", "expressions"
real: ClassVar[bool] = True
compilers: ClassVar[Any] = (AllExpression, RangeExpression)
def __init_subclass__(cls, real: bool = True, extra_compilers: Sequence = ()):
cls.real = real
if extra_compilers:
cls.compilers += extra_compilers
def __init__(self, name: str, exprs: int | str):
self.name = name
self.expressions: list = []
for expr in SEPARATOR.split(str(exprs).strip()):
self.append_expression(expr)
def get_min(self, dateval: datetime) -> int:
return MIN_VALUES[self.name]
def get_max(self, dateval: datetime) -> int:
return MAX_VALUES[self.name]
def get_value(self, dateval: datetime) -> int:
return getattr(dateval, self.name)
def get_next_value(self, dateval: datetime) -> int | None:
smallest = None
for expr in self.expressions:
value = expr.get_next_value(dateval, self)
if smallest is None or (value is not None and value < smallest):
smallest = value
return smallest
def append_expression(self, expr: str) -> None:
for compiler in self.compilers:
match = compiler.value_re.match(expr)
if match:
compiled_expr = compiler(**match.groupdict())
try:
compiled_expr.validate_range(
self.name, MIN_VALUES[self.name], MAX_VALUES[self.name]
)
except ValueError as exc:
raise ValueError(
f"Error validating expression {expr!r}: {exc}"
) from exc
self.expressions.append(compiled_expr)
return
raise ValueError(f"Unrecognized expression {expr!r} for field {self.name!r}")
def __str__(self):
expr_strings = (str(e) for e in self.expressions)
return ",".join(expr_strings)
class WeekField(BaseField, real=False):
__slots__ = ()
def get_value(self, dateval: datetime) -> int:
return dateval.isocalendar()[1]
class DayOfMonthField(
BaseField, extra_compilers=(WeekdayPositionExpression, LastDayOfMonthExpression)
):
__slots__ = ()
def get_max(self, dateval: datetime) -> int:
return monthrange(dateval.year, dateval.month)[1]
class DayOfWeekField(BaseField, real=False, extra_compilers=(WeekdayRangeExpression,)):
__slots__ = ()
def append_expression(self, expr: str) -> None:
# Convert numeric weekday expressions into textual ones
match = RangeExpression.value_re.match(expr)
if match:
groups = match.groups()
first = int(groups[0]) - 1
first = 6 if first < 0 else first
if groups[1]:
last = int(groups[1]) - 1
last = 6 if last < 0 else last
else:
last = first
expr = f"{WEEKDAYS[first]}-{WEEKDAYS[last]}"
# For expressions like Sun-Tue or Sat-Mon, add two expressions that together
# cover the expected weekdays
match = WeekdayRangeExpression.value_re.match(expr)
if match and match.groups()[1]:
groups = match.groups()
first_index = get_weekday_index(groups[0])
last_index = get_weekday_index(groups[1])
if first_index > last_index:
super().append_expression(f"{WEEKDAYS[0]}-{groups[1]}")
super().append_expression(f"{groups[0]}-{WEEKDAYS[-1]}")
return
super().append_expression(expr)
def get_value(self, dateval: datetime) -> int:
return dateval.weekday()
class MonthField(BaseField, extra_compilers=(MonthRangeExpression,)):
__slots__ = () | APScheduler | /APScheduler-4.0.0a1.tar.gz/APScheduler-4.0.0a1/src/apscheduler/triggers/cron/fields.py | fields.py |
from __future__ import annotations
import atexit
import os
import platform
import threading
from concurrent.futures import Future, ThreadPoolExecutor
from contextlib import ExitStack
from contextvars import copy_context
from datetime import datetime, timezone
from logging import Logger, getLogger
from types import TracebackType
from typing import Callable
from uuid import UUID
import attrs
from .. import JobReleased
from .._context import current_job, current_worker
from .._enums import JobOutcome, RunState
from .._events import JobAdded, WorkerStarted, WorkerStopped
from .._structures import Job, JobInfo, JobResult
from .._validators import positive_integer
from ..abc import DataStore, EventBroker
from ..eventbrokers.local import LocalEventBroker
@attrs.define(eq=False)
class Worker:
"""Runs jobs locally in a thread pool."""
data_store: DataStore
event_broker: EventBroker = attrs.field(factory=LocalEventBroker)
max_concurrent_jobs: int = attrs.field(
kw_only=True, validator=positive_integer, default=20
)
identity: str = attrs.field(kw_only=True, default=None)
logger: Logger | None = attrs.field(kw_only=True, default=getLogger(__name__))
# True if a scheduler owns this worker
_is_internal: bool = attrs.field(kw_only=True, default=False)
_state: RunState = attrs.field(init=False, default=RunState.stopped)
_thread: threading.Thread | None = attrs.field(init=False, default=None)
_wakeup_event: threading.Event = attrs.field(init=False, factory=threading.Event)
_executor: ThreadPoolExecutor = attrs.field(init=False)
_acquired_jobs: set[Job] = attrs.field(init=False, factory=set)
_running_jobs: set[UUID] = attrs.field(init=False, factory=set)
def __attrs_post_init__(self) -> None:
if not self.identity:
self.identity = f"{platform.node()}-{os.getpid()}-{id(self)}"
def __enter__(self) -> Worker:
self.start_in_background()
return self
def __exit__(
self,
exc_type: type[BaseException],
exc_val: BaseException,
exc_tb: TracebackType,
) -> None:
self.stop()
@property
def state(self) -> RunState:
"""The current running state of the worker."""
return self._state
def start_in_background(self) -> None:
"""
Launch the worker in a new thread.
This method registers an :mod:`atexit` hook to shut down the worker and wait
for the thread to finish.
"""
start_future: Future[None] = Future()
self._thread = threading.Thread(
target=copy_context().run, args=[self._run, start_future], daemon=True
)
self._thread.start()
try:
start_future.result()
except BaseException:
self._thread = None
raise
atexit.register(self.stop)
def stop(self) -> None:
"""
Signal the worker that it should stop running jobs.
This method does not wait for the worker to actually stop.
"""
atexit.unregister(self.stop)
if self._state is RunState.started:
self._state = RunState.stopping
self._wakeup_event.set()
if threading.current_thread() != self._thread:
self._thread.join()
self._thread = None
def run_until_stopped(self) -> None:
"""
Run the worker until it is explicitly stopped.
This method will only return if :meth:`stop` is called.
"""
self._run(None)
def _run(self, start_future: Future[None] | None) -> None:
with ExitStack() as exit_stack:
try:
if self._state is not RunState.stopped:
raise RuntimeError(
f'Cannot start the worker when it is in the "{self._state}" '
f"state"
)
if not self._is_internal:
# Initialize the event broker
self.event_broker.start()
exit_stack.push(
lambda *exc_info: self.event_broker.stop(
force=exc_info[0] is not None
)
)
# Initialize the data store
self.data_store.start(self.event_broker)
exit_stack.push(
lambda *exc_info: self.data_store.stop(
force=exc_info[0] is not None
)
)
# Set the current worker
token = current_worker.set(self)
exit_stack.callback(current_worker.reset, token)
# Wake up the worker if the data store emits a significant job event
exit_stack.enter_context(
self.event_broker.subscribe(
lambda event: self._wakeup_event.set(), {JobAdded}
)
)
# Initialize the thread pool
executor = ThreadPoolExecutor(max_workers=self.max_concurrent_jobs)
exit_stack.enter_context(executor)
# Signal that the worker has started
self._state = RunState.started
self.event_broker.publish_local(WorkerStarted())
except BaseException as exc:
if start_future:
start_future.set_exception(exc)
return
else:
raise
else:
if start_future:
start_future.set_result(None)
try:
while self._state is RunState.started:
available_slots = self.max_concurrent_jobs - len(self._running_jobs)
if available_slots:
jobs = self.data_store.acquire_jobs(
self.identity, available_slots
)
for job in jobs:
task = self.data_store.get_task(job.task_id)
self._running_jobs.add(job.id)
executor.submit(
copy_context().run, self._run_job, job, task.func
)
self._wakeup_event.wait()
self._wakeup_event = threading.Event()
except BaseException as exc:
self._state = RunState.stopped
if isinstance(exc, Exception):
self.logger.exception("Worker crashed")
else:
self.logger.info(f"Worker stopped due to {exc.__class__.__name__}")
self.event_broker.publish_local(WorkerStopped(exception=exc))
else:
self._state = RunState.stopped
self.logger.info("Worker stopped")
self.event_broker.publish_local(WorkerStopped())
def _run_job(self, job: Job, func: Callable) -> None:
try:
# Check if the job started before the deadline
start_time = datetime.now(timezone.utc)
if job.start_deadline is not None and start_time > job.start_deadline:
result = JobResult.from_job(
job, JobOutcome.missed_start_deadline, finished_at=start_time
)
self.event_broker.publish(
JobReleased.from_result(result, self.identity)
)
self.data_store.release_job(self.identity, job.task_id, result)
return
token = current_job.set(JobInfo.from_job(job))
try:
retval = func(*job.args, **job.kwargs)
except BaseException as exc:
if isinstance(exc, Exception):
self.logger.exception("Job %s raised an exception", job.id)
else:
self.logger.error(
"Job %s was aborted due to %s", job.id, exc.__class__.__name__
)
result = JobResult.from_job(
job,
JobOutcome.error,
exception=exc,
)
self.data_store.release_job(
self.identity,
job.task_id,
result,
)
self.event_broker.publish(
JobReleased.from_result(result, self.identity)
)
if not isinstance(exc, Exception):
raise
else:
self.logger.info("Job %s completed successfully", job.id)
result = JobResult.from_job(
job,
JobOutcome.success,
return_value=retval,
)
self.data_store.release_job(self.identity, job.task_id, result)
self.event_broker.publish(
JobReleased.from_result(result, self.identity)
)
finally:
current_job.reset(token)
finally:
self._running_jobs.remove(job.id) | APScheduler | /APScheduler-4.0.0a1.tar.gz/APScheduler-4.0.0a1/src/apscheduler/workers/sync.py | sync.py |
from __future__ import annotations
import os
import platform
from contextlib import AsyncExitStack
from datetime import datetime, timezone
from inspect import isawaitable
from logging import Logger, getLogger
from types import TracebackType
from typing import Callable
from uuid import UUID
import anyio
import attrs
from anyio import (
TASK_STATUS_IGNORED,
create_task_group,
get_cancelled_exc_class,
move_on_after,
)
from anyio.abc import CancelScope, TaskGroup
from .._context import current_job, current_worker
from .._converters import as_async_datastore, as_async_eventbroker
from .._enums import JobOutcome, RunState
from .._events import JobAdded, JobReleased, WorkerStarted, WorkerStopped
from .._structures import Job, JobInfo, JobResult
from .._validators import positive_integer
from ..abc import AsyncDataStore, AsyncEventBroker
from ..eventbrokers.async_local import LocalAsyncEventBroker
@attrs.define(eq=False)
class AsyncWorker:
"""Runs jobs locally in a task group."""
data_store: AsyncDataStore = attrs.field(converter=as_async_datastore)
event_broker: AsyncEventBroker = attrs.field(
converter=as_async_eventbroker, factory=LocalAsyncEventBroker
)
max_concurrent_jobs: int = attrs.field(
kw_only=True, validator=positive_integer, default=100
)
identity: str = attrs.field(kw_only=True, default=None)
logger: Logger | None = attrs.field(kw_only=True, default=getLogger(__name__))
# True if a scheduler owns this worker
_is_internal: bool = attrs.field(kw_only=True, default=False)
_state: RunState = attrs.field(init=False, default=RunState.stopped)
_wakeup_event: anyio.Event = attrs.field(init=False)
_task_group: TaskGroup = attrs.field(init=False)
_acquired_jobs: set[Job] = attrs.field(init=False, factory=set)
_running_jobs: set[UUID] = attrs.field(init=False, factory=set)
def __attrs_post_init__(self) -> None:
if not self.identity:
self.identity = f"{platform.node()}-{os.getpid()}-{id(self)}"
async def __aenter__(self) -> AsyncWorker:
self._task_group = create_task_group()
await self._task_group.__aenter__()
await self._task_group.start(self.run_until_stopped)
return self
async def __aexit__(
self,
exc_type: type[BaseException],
exc_val: BaseException,
exc_tb: TracebackType,
) -> None:
self._state = RunState.stopping
self._wakeup_event.set()
await self._task_group.__aexit__(exc_type, exc_val, exc_tb)
del self._task_group
del self._wakeup_event
@property
def state(self) -> RunState:
"""The current running state of the worker."""
return self._state
async def run_until_stopped(self, *, task_status=TASK_STATUS_IGNORED) -> None:
"""Run the worker until it is explicitly stopped."""
if self._state is not RunState.stopped:
raise RuntimeError(
f'Cannot start the worker when it is in the "{self._state}" ' f"state"
)
self._state = RunState.starting
self._wakeup_event = anyio.Event()
async with AsyncExitStack() as exit_stack:
if not self._is_internal:
# Initialize the event broker
await self.event_broker.start()
exit_stack.push_async_exit(
lambda *exc_info: self.event_broker.stop(
force=exc_info[0] is not None
)
)
# Initialize the data store
await self.data_store.start(self.event_broker)
exit_stack.push_async_exit(
lambda *exc_info: self.data_store.stop(
force=exc_info[0] is not None
)
)
# Set the current worker
token = current_worker.set(self)
exit_stack.callback(current_worker.reset, token)
# Wake up the worker if the data store emits a significant job event
self.event_broker.subscribe(
lambda event: self._wakeup_event.set(), {JobAdded}
)
# Signal that the worker has started
self._state = RunState.started
task_status.started()
exception: BaseException | None = None
try:
await self.event_broker.publish_local(WorkerStarted())
async with create_task_group() as tg:
while self._state is RunState.started:
limit = self.max_concurrent_jobs - len(self._running_jobs)
jobs = await self.data_store.acquire_jobs(self.identity, limit)
for job in jobs:
task = await self.data_store.get_task(job.task_id)
self._running_jobs.add(job.id)
tg.start_soon(self._run_job, job, task.func)
await self._wakeup_event.wait()
self._wakeup_event = anyio.Event()
except get_cancelled_exc_class():
pass
except BaseException as exc:
self.logger.exception("Worker crashed")
exception = exc
else:
self.logger.info("Worker stopped")
finally:
self._state = RunState.stopped
with move_on_after(3, shield=True):
await self.event_broker.publish_local(
WorkerStopped(exception=exception)
)
async def stop(self, *, force: bool = False) -> None:
"""
Signal the worker that it should stop running jobs.
This method does not wait for the worker to actually stop.
"""
if self._state in (RunState.starting, RunState.started):
self._state = RunState.stopping
event = anyio.Event()
self.event_broker.subscribe(
lambda ev: event.set(), {WorkerStopped}, one_shot=True
)
if force:
self._task_group.cancel_scope.cancel()
else:
self._wakeup_event.set()
await event.wait()
async def _run_job(self, job: Job, func: Callable) -> None:
try:
# Check if the job started before the deadline
start_time = datetime.now(timezone.utc)
if job.start_deadline is not None and start_time > job.start_deadline:
result = JobResult.from_job(
job,
outcome=JobOutcome.missed_start_deadline,
finished_at=start_time,
)
await self.data_store.release_job(self.identity, job.task_id, result)
await self.event_broker.publish(
JobReleased.from_result(result, self.identity)
)
return
token = current_job.set(JobInfo.from_job(job))
try:
retval = func(*job.args, **job.kwargs)
if isawaitable(retval):
retval = await retval
except get_cancelled_exc_class():
self.logger.info("Job %s was cancelled", job.id)
with CancelScope(shield=True):
result = JobResult.from_job(
job,
outcome=JobOutcome.cancelled,
)
await self.data_store.release_job(
self.identity, job.task_id, result
)
await self.event_broker.publish(
JobReleased.from_result(result, self.identity)
)
except BaseException as exc:
if isinstance(exc, Exception):
self.logger.exception("Job %s raised an exception", job.id)
else:
self.logger.error(
"Job %s was aborted due to %s", job.id, exc.__class__.__name__
)
result = JobResult.from_job(
job,
JobOutcome.error,
exception=exc,
)
await self.data_store.release_job(
self.identity,
job.task_id,
result,
)
await self.event_broker.publish(
JobReleased.from_result(result, self.identity)
)
if not isinstance(exc, Exception):
raise
else:
self.logger.info("Job %s completed successfully", job.id)
result = JobResult.from_job(
job,
JobOutcome.success,
return_value=retval,
)
await self.data_store.release_job(self.identity, job.task_id, result)
await self.event_broker.publish(
JobReleased.from_result(result, self.identity)
)
finally:
current_job.reset(token)
finally:
self._running_jobs.remove(job.id) | APScheduler | /APScheduler-4.0.0a1.tar.gz/APScheduler-4.0.0a1/src/apscheduler/workers/async_.py | async_.py |
##########################
Frequently Asked Questions
##########################
Why doesn't the scheduler run my jobs?
======================================
This could be caused by a number of things. The two most common issues are:
#. Running the scheduler inside a uWSGI worker process while threads have not been
enabled (see the next section for this)
#. Starting a synchronous scheduler with
:meth:`~apscheduler.schedulers.sync.Scheduler.start_in_background` and then letting
the execution reach the end of the script
To demonstrate the latter case, a script like this will **not work**::
from apscheduler.schedulers.sync import Scheduler
from apscheduler.schedulers.triggers.cron import CronTrigger
def mytask():
print("hello")
scheduler = Scheduler()
scheduler.start_in_background()
scheduler.add_schedule(mytask, CronTrigger(hour=0))
The script above will **exit** right after calling
:meth:`~apscheduler.schedulers.sync.add_schedule` so the scheduler will not have a
chance to run the scheduled task.
If you're having any other issue, then enabling debug logging as instructed in the
:ref:`troubleshooting` section should shed some light into the problem.
Why am I getting a ValueError?
==============================
If you're receiving an error like the following::
ValueError: This Job cannot be serialized since the reference to its callable (<bound method xxxxxxxx.on_crn_field_submission
of <__main__.xxxxxxx object at xxxxxxxxxxxxx>>) could not be determined. Consider giving a textual reference (module:function
name) instead.
This means that the function you are attempting to schedule has one of the following
problems:
* It is a lambda function (e.g. ``lambda x: x + 1``)
* It is a bound method (function tied to a particular instance of some class)
* It is a nested function (function inside another function)
* You are trying to schedule a function that is not tied to any actual module (such as a
function defined in the REPL, hence ``__main__`` as the module name)
In these cases, it is impossible for the scheduler to determine a "lookup path" to find
that specific function instance in situations where, for example, the scheduler process
is restarted, or a process pool worker is being sent the related job object.
Common workarounds for these problems include:
* Converting a lambda to a regular function
* Moving a nested function to the module level or to class level as either a class
method or a static method
* In case of a bound method, passing the unbound version (``YourClass.method_name``) as
the target function to ``add_job()`` with the class instance as the first argument (so
it gets passed as the ``self`` argument)
Is there a graphical user interface for APScheduler?
====================================================
No graphical interface is provided by the library itself. However, there are some third
party implementations, but APScheduler developers are not responsible for them. Here is
a potentially incomplete list:
* django_apscheduler_
* apschedulerweb_
* `Nextdoor scheduler`_
.. warning:: As of this writing, these third party offerings have not been updated to
work with APScheduler 4.
.. _django_apscheduler: https://pypi.org/project/django-apscheduler/
.. _Flask-APScheduler: https://pypi.org/project/flask-apscheduler/
.. _pyramid_scheduler: https://github.com/cadithealth/pyramid_scheduler
.. _aiohttp: https://pypi.org/project/aiohttp/
.. _apschedulerweb: https://github.com/marwinxxii/apschedulerweb
.. _Nextdoor scheduler: https://github.com/Nextdoor/ndscheduler
| APScheduler | /APScheduler-4.0.0a1.tar.gz/APScheduler-4.0.0a1/docs/faq.rst | faq.rst |
##########
User guide
##########
Installation
============
The preferred installation method is by using `pip <http://pypi.python.org/pypi/pip/>`_::
$ pip install apscheduler
If you don't have pip installed, you need to
`install that first <https://pip.pypa.io/en/stable/installation/>`_.
Code examples
=============
The source distribution contains the :file:`examples` directory where you can find many
working examples for using APScheduler in different ways. The examples can also be
`browsed online <https://github.com/agronholm/apscheduler/tree/master/examples/?at=master>`_.
Introduction
============
The core concept of APScheduler is to give the user the ability to queue Python code to
be executed, either as soon as possible, later at a given time, or on a recurring
schedule. To make this happen, APScheduler has two types of components: *schedulers* and
*workers*.
A scheduler is the user-facing interface of the system. When running, it asks its
associated *data store* for *schedules* due to be run. For each such schedule, it then
uses the schedule's associated *trigger* to calculate run times up to the present. For
each run time, the scheduler creates a *job* in the data store, containing the
designated run time and the identifier of the schedule it was derived from.
A worker asks the data store for jobs, and then starts running those jobs. If the data
store signals that it has new jobs, the worker will try to acquire those jobs if it is
capable of accommodating more jobs. When a worker completes a job, it will then also ask
the data store for as many more jobs as it can handle.
By default, each scheduler starts an internal worker to simplify use, but in more
complex use cases you may wish to run them in separate processes, or even on separate
nodes. For this, you'll need both a persistent data store and an *event broker*, shared
by both the scheduler(s) and worker(s). For more information, see the section below on
running schedulers and workers separately.
Basic concepts / glossary
=========================
These are the basic components and concepts of APScheduler whixh will be referenced
later in this guide.
A *task* encapsulates a Python function and a number of configuration parameters. They
are often implicitly defined as a side effect of the user creating a new schedule
against a function, but can also be explicitly defined beforehand (**TODO**: implement
this!).
A *trigger* contains the logic and state used to calculate when a scheduled task should
be run.
A *schedule* combines a task with a trigger, plus a number of configuration parameters.
A *job* is request for a task to be run. It can be created automatically from a schedule
when a scheduler processes it, or it can be directly created by the user if they
directly request a task to be run.
A *data store* is used to store *schedules* and *jobs*, and to keep track of tasks.
A *scheduler* fetches schedules due for their next runs from its associated data store
and then creates new jobs accordingly.
A *worker* fetches jobs from its data store, runs them and pushes the results back to
the data store.
An *event broker* delivers published events to all interested parties. It facilitates
the cooperation between schedulers and workers by notifying them of new or updated
schedules or jobs.
Scheduling tasks
================
To create a schedule for running a task, you need, at the minimum:
* A *callable* to be run
* A *trigger*
.. note:: Scheduling lambdas or nested functions is currently not possible. This will be
fixed before the final release.
The callable can be a function or method, lambda or even an instance of a class that
contains the ``__call__()`` method. With the default (memory based) data store, any
callable can be used as a task callable. Persistent data stores (more on those below)
place some restrictions on the kinds of callables can be used because they cannot store
the callable directly but instead need to be able to locate it with a *reference*.
The trigger determines the scheduling logic for your schedule. In other words, it is
used to calculate the datetimes on which the task will be run. APScheduler comes with a
number of built-in trigger classes:
* :class:`~apscheduler.triggers.date.DateTrigger`:
use when you want to run the task just once at a certain point of time
* :class:`~apscheduler.triggers.interval.IntervalTrigger`:
use when you want to run the task at fixed intervals of time
* :class:`~apscheduler.triggers.cron.CronTrigger`:
use when you want to run the task periodically at certain time(s) of day
* :class:`~apscheduler.triggers.calendarinterval.CalendarIntervalTrigger`:
use when you want to run the task on calendar-based intervals, at a specific time of
day
Combining multiple triggers
---------------------------
Occasionally, you may find yourself in a situation where your scheduling needs are too
complex to be handled with any of the built-in triggers directly.
One examples of such a need would be when you want the task to run at 10:00 from Monday
to Friday, but also at 11:00 from Saturday to Sunday.
A single :class:`~apscheduler.triggers.cron.CronTrigger` would not be able to handle
this case, but an :class:`~apscheduler.triggers.combining.OrTrigger` containing two cron
triggers can::
from apscheduler.triggers.combining import OrTrigger
from apscheduler.triggers.cron import CronTrigger
trigger = OrTrigger(
CronTrigger(day_of_week="mon-fri", hour=10),
CronTrigger(day_of_week="sat-sun", hour=11),
)
On the first run, :class:`~apscheduler.triggers.combining.OrTrigger` generates the next
run times from both cron triggers and saves them internally. It then returns the
earliest one. On the next run, it generates a new run time from the trigger that
produced the earliest run time on the previous run, and then again returns the earliest
of the two run times. This goes on until all the triggers have been exhausted, if ever.
Another example would be a case where you want the task to be run every 2 months at
10:00, but not on weekends (Saturday or Sunday)::
from apscheduler.triggers.calendarinterval import CalendarIntervalTrigger
from apscheduler.triggers.combining import AndTrigger
from apscheduler.triggers.cron import CronTrigger
trigger = AndTrigger(
CalendarIntervalTrigger(months=2, hour=10),
CronTrigger(day_of_week="mon-fri", hour=10),
)
On the first run, :class:`~apscheduler.triggers.combining.AndTrigger` generates the next
run times from both the
:class:`~apscheduler.triggers.calendarinterval.CalendarIntervalTrigger` and
:class:`~apscheduler.triggers.cron.CronTrigger`. If the run times coincide, it will
return that run time. Otherwise, it will calculate a new run time from the trigger that
produced the earliest run time. It will keep doing this until a match is found, one of
the triggers has been exhausted or the maximum number of iterations (1000 by default) is
reached.
If this trigger is created on 2022-06-07 at 09:00:00, its first run times would be:
* 2022-06-07 10:00:00
* 2022-10-07 10:00:00
* 2022-12-07 10:00:00
Notably, 2022-08-07 is skipped because it falls on a Sunday.
Running tasks without scheduling
--------------------------------
In some cases, you want to run tasks directly, without involving schedules:
* You're only interested in using the scheduler system as a job queue
* You're interested in the job's return value
To queue a job and wait for its completion and get the result, the easiest way is to
use :meth:`~apscheduler.schedulers.sync.Scheduler.run_job`. If you prefer to just launch
a job and not wait for its result, use
:meth:`~apscheduler.schedulers.sync.Scheduler.add_job` instead. If you want to get the
results later, you can then call
:meth:`~apscheduler.schedulers.sync.Scheduler.get_job_result` with the job ID you got
from :meth:`~apscheduler.schedulers.sync.Scheduler.add_job`.
Removing schedules
------------------
To remove a previously added schedule, call
:meth:`~apscheduler.schedulers.sync.Scheduler.remove_schedule`. Pass the identifier of
the schedule you want to remove as an argument. This is the ID you got from
:meth:`~apscheduler.schedulers.sync.Scheduler.add_schedule`.
Note that removing a schedule does not cancel any jobs derived from it, but does prevent
further jobs from being created from that schedule.
Limiting the number of concurrently executing instances of a job
----------------------------------------------------------------
It is possible to control the maximum number of concurrently running jobs for a
particular task. By default, only one job is allowed to be run for every task.
This means that if the job is about to be run but there is another job for the same task
still running, the later job is terminated with the outcome of
:data:`~apscheduler.JobOutcome.missed_start_deadline`.
To allow more jobs to be concurrently running for a task, pass the desired maximum
number as the ``max_instances`` keyword argument to
:meth:`~apscheduler.schedulers.sync.Scheduler.add_schedule`.~
Controlling how much a job can be started late
----------------------------------------------
Some tasks are time sensitive, and should not be run at all if it fails to be started on
time (like, for example, if the worker(s) were down while they were supposed to be
running the scheduled jobs). You can control this time limit with the
``misfire_grace_time`` option passed to
:meth:`~apscheduler.schedulers.sync.Scheduler.add_schedule`. A worker that acquires the
job then checks if the current time is later than the deadline
(run time + misfire grace time) and if it is, it skips the execution of the job and
releases it with the outcome of :data:`~apscheduler.JobOutcome.`
Controlling how jobs are queued from schedules
----------------------------------------------
In most cases, when a scheduler processes a schedule, it queues a new job using the
run time currently marked for the schedule. Then it updates the next run time using the
schedule's trigger and releases the schedule back to the data store. But sometimes a
situation occurs where the schedule did not get processed often or quickly enough, and
one or more next run times produced by the trigger are actually in the past.
In a situation like that, the scheduler needs to decide what to do: to queue a job for
every run time produced, or to *coalesce* them all into a single job, effectively just
kicking off a single job. To control this, pass the ``coalesce`` argument to
:meth:`~apscheduler.schedulers.sync.Scheduler.add_schedule`.
The possible values are:
* :data:`~apscheduler.CoalescePolicy.latest`: queue exactly one job, using the
**latest** run time as the designated run time
* :data:`~apscheduler.CoalescePolicy.earliest`: queue exactly one job, using the
**earliest** run time as the designated run time
* :data:`~apscheduler.CoalescePolicy.all`: queue one job for **each** of the calculated
run times
The biggest difference between the first two options is how the designated run time, and
by extension, the starting deadline is for the job is selected. With the first option,
the job is less likely to be skipped due to being started late since the latest of all
the collected run times is used for the deadline calculation.
As explained in the previous section, the starting
deadline is *misfire grace time*
affects the newly queued job.
Context variables
=================
Schedulers and workers provide certain `context variables`_ available to the tasks being
run:
* The current scheduler: :data:`~apscheduler.current_scheduler`
* The current worker: :data:`~apscheduler.current_worker`
* Information about the job being currently run: :data:`~apscheduler.current_job`
Here's an example::
from apscheduler import current_job
def my_task_function():
job_info = current_job.get().id
print(
f"This is job {job_info.id} and was spawned from schedule "
f"{job_info.schedule_id}"
)
.. _context variables: :mod:`contextvars`
.. _scheduler-events:
Subscribing to events
=====================
Schedulers and workers have the ability to notify listeners when some event occurs in
the scheduler system. Examples of such events would be schedulers or workers starting up
or shutting down, or schedules or jobs being created or removed from the data store.
To listen to events, you need a callable that takes a single positional argument which
is the event object. Then, you need to decide which events you're interested in:
.. tabs::
.. code-tab:: python Synchronous
from apscheduler import Event, JobAcquired, JobReleased
def listener(event: Event) -> None:
print(f"Received {event.__class__.__name__}")
scheduler.events.subscribe(listener, {JobAcquired, JobReleased})
.. code-tab:: python Asynchronous
from apscheduler import Event, JobAcquired, JobReleased
async def listener(event: Event) -> None:
print(f"Received {event.__class__.__name__}")
scheduler.events.subscribe(listener, {JobAcquired, JobReleased})
This example subscribes to the :class:`~apscheduler.JobAcquired` and
:class:`~apscheduler.JobAcquired` event types. The callback will receive an event of
either type, and prints the name of the class of the received event.
Asynchronous schedulers and workers support both synchronous and asynchronous callbacks,
but their synchronous counterparts only support synchronous callbacks.
When **distributed** event brokers (that is, other than the default one) are being used,
events other than the ones relating to the life cycles of schedulers and workers, will
be sent to all schedulers and workers connected to that event broker.
Deployment
==========
Using persistent data stores
----------------------------
The default data store, :class:`~apscheduler.datastores.memory.MemoryDataStore`, stores
data only in memory so all the schedules and jobs that were added to it will be erased
if the process crashes.
When you need your schedules and jobs to survive the application shutting down, you need
to use a *persistent data store*. Such data stores do have additional considerations,
compared to the memory data store:
* The task callable cannot be a lambda or a nested function
* Task arguments must be *serializable*
* You must either trust the data store, or use an alternate *serializer*
* A *conflict policy* and an *explicit identifier* must be defined for schedules that
are added at application startup
These requirements warrant some explanation. The first point means that since persisting
data means saving it externally, either in a file or sending to a database server, all
the objects involved are converted to bytestrings. This process is called
*serialization*. By default, this is done using :mod:`pickle`, which guarantees the best
compatibility but is notorious for being vulnerable to simple injection attacks. This
brings us to the second point. If you cannot be sure that nobody can maliciously alter
the externally stored serialized data, it would be best to use another serializer. The
built-in alternatives are:
* :class:`~apscheduler.serializers.cbor.CBORSerializer`
* :class:`~apscheduler.serializers.json.JSONSerializer`
The former requires the cbor2_ library, but supports a wider variety of types natively.
The latter has no dependencies but has very limited support for different types.
The third point relates to situations where you're essentially adding the same schedule
to the data store over and over again. If you don't specify a static identifier for
the schedules added at the start of the application, you will end up with an increasing
number of redundant schedules doing the same thing, which is probably not what you want.
To that end, you will need to come up with some identifying name which will ensure that
the same schedule will not be added over and over again (as data stores are required to
enforce the uniqueness of schedule identifiers). You'll also need to decide what to do
if the schedule already exists in the data store (that is, when the application is
started the second time) by passing the ``conflict_policy`` argument. Usually you want
the :data:`~apscheduler.ConflictPolicy.replace` option, which replaces the existing
schedule with the new one.
.. seealso:: You can find practical examples of persistent data stores in the
:file:`examples/standalone` directory (``async_postgres.py`` and
``async_mysql.py``).
.. _cbor2: https://pypi.org/project/cbor2/
Using multiple schedulers
-------------------------
There are several situations in which you would want to run several schedulers against
the same data store at once:
* Running a server application (usually a web app) with multiple workers
* You need fault tolerance (scheduling will continue even if a node or process running
a scheduler goes down)
When you have multiple schedulers (or workers; see the next section) running at once,
they need to be able to coordinate their efforts so that the schedules don't get
processed more than once and the schedulers know when to wake up even if another
scheduler added the next due schedule to the data store. To this end, a shared
*event broker* must be configured.
.. seealso:: You can find practical examples of data store sharing in the
:file:`examples/web` directory.
Running schedulers and workers separately
-----------------------------------------
Some deployment scenarios may warrant running workers separately from the schedulers.
For example, if you want to set up a scalable worker pool, you can run just the workers
in that pool and the schedulers elsewhere without the internal workers. To prevent the
scheduler from starting an internal worker, you need to pass it the
``start_worker=False`` option.
Starting a worker without a scheduler looks very similar to the procedure to start a
scheduler:
.. tabs::
.. code-tab: python Synchronous
from apscheduler.workers.sync import Worker
data_store = ...
event_broker = ...
worker = Worker(data_store, event_broker)
worker.run_until_stopped()
.. code-tab: python asyncio
import asyncio
from apscheduler.workers.async_ import AsyncWorker
async def main():
data_store = ...
event_broker = ...
async with AsyncWorker(data_store, event_broker) as worker:
await worker.wait_until_stopped()
asyncio.run(main())
There is one significant matter to take into consideration if you do this. The scheduler
object, usually available from :data:`~apscheduler.current_scheduler`, will not be set
since there is no scheduler running in the current thread/task.
.. seealso:: A practical example of separate schedulers and workers can be found in the
:file:`examples/separate_worker` directory.
.. _troubleshooting:
Troubleshooting
===============
If something isn't working as expected, it will be helpful to increase the logging level
of the ``apscheduler`` logger to the ``DEBUG`` level.
If you do not yet have logging enabled in the first place, you can do this::
import logging
logging.basicConfig()
logging.getLogger('apscheduler').setLevel(logging.DEBUG)
This should provide lots of useful information about what's going on inside the
scheduler and/or worker.
Also make sure that you check the :doc:`faq` section to see if your problem already has
a solution.
Reporting bugs
==============
A `bug tracker <https://github.com/agronholm/apscheduler/issues>`_ is provided by
GitHub.
Getting help
============
If you have problems or other questions, you can either:
* Ask in the `apscheduler <https://gitter.im/apscheduler/Lobby>`_ room on Gitter
* Post a question on `GitHub discussions`_, or
* Post a question on StackOverflow_ and add the ``apscheduler`` tag
.. _GitHub discussions: https://github.com/agronholm/apscheduler/discussions/categories/q-a
.. _StackOverflow: http://stackoverflow.com/questions/tagged/apscheduler
| APScheduler | /APScheduler-4.0.0a1.tar.gz/APScheduler-4.0.0a1/docs/userguide.rst | userguide.rst |
Version history
===============
To find out how to migrate your application from a previous version of
APScheduler, see the :doc:`migration section <migration>`.
**4.0.0a1**
This was a major rewrite/redesign of most parts of the project. See the
:doc:`migration section <migration>` section for details.
.. warning:: The v4.0 series is provided as a **pre-release** and may change in a
backwards incompatible fashion without any migration pathway, so do NOT use this
release in production!
- Made persistent data stores shareable between multiple processes and nodes
- Enhanced data stores to be more resilient against temporary connectivity failures
- Refactored executors (now called *workers*) to pull jobs from the data store so they
can be run independently from schedulers
- Added full async support (:mod:`asyncio` and Trio_) via AnyIO_
- Added type annotations to the code base
- Added the ability to queue jobs directly without scheduling them
- Added alternative serializers (CBOR, JSON)
- Added the ``CalendarInterval`` trigger
- Added the ability to access the current scheduler (under certain circumstances),
current worker and the currently running job via context-local variables
- Added schedule level support for jitter
- Made triggers stateful
- Added threshold support for ``AndTrigger``
- Migrated from ``pytz`` time zones to standard library ``zoneinfo`` zones
- Allowed a wider range of tzinfo implementations to be used (though ``zoneinfo`` is
preferred)
- Changed ``IntervalTrigger`` to start immediately instead of first waiting for one
interval
- Changed ``CronTrigger`` to use Sunday as weekday number 0, as per the crontab standard
- Dropped support for Python 2.X, 3.5 and 3.6
- Dropped support for the Qt, Twisted, Tornado and Gevent schedulers
- Dropped support for the Redis, RethinkDB and Zookeeper job stores
.. _Trio: https://pypi.org/project/trio/
.. _AnyIO: https://github.com/agronholm/anyio
**3.9.1**
* Removed a leftover check for pytz ``localize()`` and ``normalize()`` methods
**3.9.0**
- Added support for PySide6 to the Qt scheduler
- No longer enforce pytz time zones (support for others is experimental in the 3.x series)
- Fixed compatibility with PyMongo 4
- Fixed pytz deprecation warnings
- Fixed RuntimeError when shutting down the scheduler from a scheduled job
**3.8.1**
- Allowed the use of tzlocal v4.0+ in addition to v2.*
**3.8.0**
- Allowed passing through keyword arguments to the underlying stdlib executors in the
thread/process pool executors (PR by Albert Xu)
**3.7.0**
- Dropped support for Python 3.4
- Added PySide2 support (PR by Abdulla Ibrahim)
- Pinned ``tzlocal`` to a version compatible with pytz
- Ensured that jitter is always non-negative to prevent triggers from firing more often than
intended
- Changed ``AsyncIOScheduler`` to obtain the event loop in ``start()`` instead of ``__init__()``,
to prevent situations where the scheduler won't run because it's using a different event loop
than then one currently running
- Made it possible to create weak references to ``Job`` instances
- Made the schedulers explicitly raise a descriptive ``TypeError`` when serialization is attempted
- Fixed Zookeeper job store using backslashes instead of forward slashes for paths
on Windows (PR by Laurel-rao)
- Fixed deprecation warnings on the MongoDB job store and increased the minimum PyMongo
version to 3.0
- Fixed ``BlockingScheduler`` and ``BackgroundScheduler`` shutdown hanging after the user has
erroneously tried to start it twice
- Fixed memory leak when coroutine jobs raise exceptions (due to reference cycles in tracebacks)
- Fixed inability to schedule wrapped functions with extra arguments when the wrapped function
cannot accept them but the wrapper can (original PR by Egor Malykh)
- Fixed potential ``where`` clause error in the SQLAlchemy job store when a subclass uses more than
one search condition
- Fixed a problem where bound methods added as jobs via textual references were called with an
unwanted extra ``self`` argument (PR by Pengjie Song)
- Fixed ``BrokenPoolError`` in ``ProcessPoolExecutor`` so that it will automatically replace the
broken pool with a fresh instance
**3.6.3**
- Fixed Python 2.7 accidentally depending on the ``trollius`` package (regression from v3.6.2)
**3.6.2**
- Fixed handling of :func:`~functools.partial` wrapped coroutine functions in ``AsyncIOExecutor``
and ``TornadoExecutor`` (PR by shipmints)
**3.6.1**
- Fixed OverflowError on Qt scheduler when the wait time is very long
- Fixed methods inherited from base class could not be executed by processpool executor
(PR by Yang Jian)
**3.6.0**
- Adapted ``RedisJobStore`` to v3.0 of the ``redis`` library
- Adapted ``RethinkDBJobStore`` to v2.4 of the ``rethink`` library
- Fixed ``DeprecationWarnings`` about ``collections.abc`` on Python 3.7 (PR by Roman Levin)
**3.5.3**
- Fixed regression introduced in 3.5.2: Class methods were mistaken for instance methods and thus
were broken during serialization
- Fixed callable name detection for methods in old style classes
**3.5.2**
- Fixed scheduling of bound methods on persistent job stores (the workaround of scheduling
``YourClass.methodname`` along with an explicit ``self`` argument is no longer necessary as this
is now done automatically for you)
- Added the FAQ section to the docs
- Made ``BaseScheduler.start()`` raise a ``RuntimeError`` if running under uWSGI with threads
disabled
**3.5.1**
- Fixed ``OverflowError`` on Windows when the wait time is too long
- Fixed ``CronTrigger`` sometimes producing fire times beyond ``end_date`` when jitter is enabled
(thanks to gilbsgilbs for the tests)
- Fixed ISO 8601 UTC offset information being silently discarded from string formatted datetimes by
adding support for parsing them
**3.5.0**
- Added the ``engine_options`` option to ``SQLAlchemyJobStore``
- Added the ``jitter`` options to ``IntervalTrigger`` and ``CronTrigger`` (thanks to gilbsgilbs)
- Added combining triggers (``AndTrigger`` and ``OrTrigger``)
- Added better validation for the steps and ranges of different expressions in ``CronTrigger``
- Added support for named months (``jan`` – ``dec``) in ``CronTrigger`` month expressions
- Added support for creating a ``CronTrigger`` from a crontab expression
- Allowed spaces around commas in ``CronTrigger`` fields
- Fixed memory leak due to a cyclic reference when jobs raise exceptions
(thanks to gilbsgilbs for help on solving this)
- Fixed passing ``wait=True`` to ``AsyncIOScheduler.shutdown()`` (although it doesn't do much)
- Cancel all pending futures when ``AsyncIOExecutor`` is shut down
**3.4.0**
- Dropped support for Python 3.3
- Added the ability to specify the table schema for ``SQLAlchemyJobStore``
(thanks to Meir Tseitlin)
- Added a workaround for the ``ImportError`` when used with PyInstaller and the likes
(caused by the missing packaging metadata when APScheduler is packaged with these tools)
**3.3.1**
- Fixed Python 2.7 compatibility in ``TornadoExecutor``
**3.3.0**
- The asyncio and Tornado schedulers can now run jobs targeting coroutine functions
(requires Python 3.5; only native coroutines (``async def``) are supported)
- The Tornado scheduler now uses TornadoExecutor as its default executor (see above as for why)
- Added ZooKeeper job store (thanks to Jose Ignacio Villar for the patch)
- Fixed job store failure (``get_due_jobs()``) causing the scheduler main loop to exit (it now
waits a configurable number of seconds before retrying)
- Fixed ``@scheduled_job`` not working when serialization is required (persistent job stores and
``ProcessPoolScheduler``)
- Improved import logic in ``ref_to_obj()`` to avoid errors in cases where traversing the path with
``getattr()`` would not work (thanks to Jarek Glowacki for the patch)
- Fixed CronTrigger's weekday position expressions failing on Python 3
- Fixed CronTrigger's range expressions sometimes allowing values outside the given range
**3.2.0**
- Added the ability to pause and unpause the scheduler
- Fixed pickling problems with persistent jobs when upgrading from 3.0.x
- Fixed AttributeError when importing apscheduler with setuptools < 11.0
- Fixed some events missing from ``apscheduler.events.__all__`` and
``apscheduler.events.EVENTS_ALL``
- Fixed wrong run time being set for date trigger when the timezone isn't the same as the local one
- Fixed builtin ``id()`` erroneously used in MongoDBJobStore's ``JobLookupError()``
- Fixed endless loop with CronTrigger that may occur when the computer's clock resolution is too
low (thanks to Jinping Bai for the patch)
**3.1.0**
- Added RethinkDB job store (contributed by Allen Sanabria)
- Added method chaining to the ``modify_job()``, ``reschedule_job()``, ``pause_job()`` and
``resume_job()`` methods in ``BaseScheduler`` and the corresponding methods in the ``Job`` class
- Added the EVENT_JOB_SUBMITTED event that indicates a job has been submitted to its executor.
- Added the EVENT_JOB_MAX_INSTANCES event that indicates a job's execution was skipped due to its
maximum number of concurrently running instances being reached
- Added the time zone to the repr() output of ``CronTrigger`` and ``IntervalTrigger``
- Fixed rare race condition on scheduler ``shutdown()``
- Dropped official support for CPython 2.6 and 3.2 and PyPy3
- Moved the connection logic in database backed job stores to the ``start()`` method
- Migrated to setuptools_scm for versioning
- Deprecated the various version related variables in the ``apscheduler`` module
(``apscheduler.version_info``, ``apscheduler.version``, ``apscheduler.release``,
``apscheduler.__version__``)
**3.0.6**
- Fixed bug in the cron trigger that produced off-by-1-hour datetimes when crossing the daylight
saving threshold (thanks to Tim Strazny for reporting)
**3.0.5**
- Fixed cron trigger always coalescing missed run times into a single run time
(contributed by Chao Liu)
- Fixed infinite loop in the cron trigger when an out-of-bounds value was given in an expression
- Fixed debug logging displaying the next wakeup time in the UTC timezone instead of the
scheduler's configured timezone
- Allowed unicode function references in Python 2
**3.0.4**
- Fixed memory leak in the base executor class (contributed by Stefan Nordhausen)
**3.0.3**
- Fixed compatibility with pymongo 3.0
**3.0.2**
- Fixed ValueError when the target callable has a default keyword argument that wasn't overridden
- Fixed wrong job sort order in some job stores
- Fixed exception when loading all jobs from the redis job store when there are paused jobs in it
- Fixed AttributeError when printing a job list when there were pending jobs
- Added setuptools as an explicit requirement in install requirements
**3.0.1**
- A wider variety of target callables can now be scheduled so that the jobs are still serializable
(static methods on Python 3.3+, unbound methods on all except Python 3.2)
- Attempting to serialize a non-serializable Job now raises a helpful exception during
serialization. Thanks to Jeremy Morgan for pointing this out.
- Fixed table creation with SQLAlchemyJobStore on MySQL/InnoDB
- Fixed start date getting set too far in the future with a timezone different from the local one
- Fixed _run_job_error() being called with the incorrect number of arguments in most executors
**3.0.0**
- Added support for timezones (special thanks to Curtis Vogt for help with this one)
- Split the old Scheduler class into BlockingScheduler and BackgroundScheduler and added
integration for asyncio (PEP 3156), Gevent, Tornado, Twisted and Qt event loops
- Overhauled the job store system for much better scalability
- Added the ability to modify, reschedule, pause and resume jobs
- Dropped the Shelve job store because it could not work with the new job store system
- Dropped the max_runs option and run counting of jobs since it could not be implemented reliably
- Adding jobs is now done exclusively through ``add_job()`` -- the shortcuts to triggers were
removed
- Added the ``end_date`` parameter to cron and interval triggers
- It is now possible to add a job directly to an executor without scheduling, by omitting the
trigger argument
- Replaced the thread pool with a pluggable executor system
- Added support for running jobs in subprocesses (via the ``processpool`` executor)
- Switched from nose to py.test for running unit tests
**2.1.0**
- Added Redis job store
- Added a "standalone" mode that runs the scheduler in the calling thread
- Fixed disk synchronization in ShelveJobStore
- Switched to PyPy 1.9 for PyPy compatibility testing
- Dropped Python 2.4 support
- Fixed SQLAlchemy 0.8 compatibility in SQLAlchemyJobStore
- Various documentation improvements
**2.0.3**
- The scheduler now closes the job store that is being removed, and all job stores on shutdown() by
default
- Added the ``last`` expression in the day field of CronTrigger (thanks rcaselli)
- Raise a TypeError when fields with invalid names are passed to CronTrigger (thanks Christy
O'Reilly)
- Fixed the persistent.py example by shutting down the scheduler on Ctrl+C
- Added PyPy 1.8 and CPython 3.3 to the test suite
- Dropped PyPy 1.4 - 1.5 and CPython 3.1 from the test suite
- Updated setup.cfg for compatibility with distutils2/packaging
- Examples, documentation sources and unit tests are now packaged in the source distribution
**2.0.2**
- Removed the unique constraint from the "name" column in the SQLAlchemy job store
- Fixed output from Scheduler.print_jobs() which did not previously output a line ending at the end
**2.0.1**
- Fixed cron style jobs getting wrong default values
**2.0.0**
- Added configurable job stores with several persistent back-ends (shelve, SQLAlchemy and MongoDB)
- Added the possibility to listen for job events (execution, error, misfire, finish) on a scheduler
- Added an optional start time for cron-style jobs
- Added optional job execution coalescing for situations where several executions of the job are
due
- Added an option to limit the maximum number of concurrently executing instances of the job
- Allowed configuration of misfire grace times on a per-job basis
- Allowed jobs to be explicitly named
- All triggers now accept dates in string form (YYYY-mm-dd HH:MM:SS)
- Jobs are now run in a thread pool; you can either supply your own PEP 3148 compliant thread pool
or let APScheduler create its own
- Maximum run count can be configured for all jobs, not just those using interval-based scheduling
- Fixed a v1.x design flaw that caused jobs to be executed twice when the scheduler thread was
woken up while still within the allowable range of their previous execution time (issues #5, #7)
- Changed defaults for cron-style jobs to be more intuitive -- it will now default to all
minimum values for fields lower than the least significant explicitly defined field
**1.3.1**
- Fixed time difference calculation to take into account shifts to and from daylight saving time
**1.3.0**
- Added __repr__() implementations to expressions, fields, triggers, and jobs to help with
debugging
- Added the dump_jobs method on Scheduler, which gives a helpful listing of all jobs scheduled on
it
- Fixed positional weekday (3th fri etc.) expressions not working except in some edge cases
(fixes #2)
- Removed autogenerated API documentation for modules which are not part of the public API, as it
might confuse some users
.. Note:: Positional weekdays are now used with the **day** field, not
**weekday**.
**1.2.1**
- Fixed regression: add_cron_job() in Scheduler was creating a CronTrigger with the wrong
parameters (fixes #1, #3)
- Fixed: if the scheduler is restarted, clear the "stopped" flag to allow jobs to be scheduled
again
**1.2.0**
- Added the ``week`` option for cron schedules
- Added the ``daemonic`` configuration option
- Fixed a bug in cron expression lists that could cause valid firing times to be missed
- Fixed unscheduling bound methods via unschedule_func()
- Changed CronTrigger constructor argument names to match those in Scheduler
**1.01**
- Fixed a corner case where the combination of hour and day_of_week parameters would cause
incorrect timing for a cron trigger
| APScheduler | /APScheduler-4.0.0a1.tar.gz/APScheduler-4.0.0a1/docs/versionhistory.rst | versionhistory.rst |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.