repo_name
stringlengths 7
94
| repo_path
stringlengths 4
237
| repo_head_hexsha
stringlengths 40
40
| content
stringlengths 10
680k
| apis
stringlengths 2
840k
|
---|---|---|---|---|
zuhalcakir/simone | simone/person/management.py | 88e04e6a228570d7d2a4c8bbf683e4903eeb592b | #from django.dispatch import dispatcher
#def UserProfilePostInsert(sender, instance, signal, *args, **kwargs):
#"""
#Inserts a blank imap server entry (if necessary) and associates it with the user
#"""
#user = instance
#i = user.get_profile().imap_servers.create()
#user.get_profile().about = 'test'
#i.save()
#user.save_profile()
## we want this called after every user is inserted
#dispatcher.connect(UserProfilePostInsert, signal=signals.pre_save, sender=User)
| [] |
cog-isa/htm-rl | watcher/fly.py | baf5b67a11283d37165bf6a29d6808a234d6d98c | from utils.drawer import Drawer
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("name", help="the name of the datafile")
parser.add_argument("--size", help="width,height")
args = parser.parse_args()
if args.size is None:
width, height = 1280, 720
else:
width, height = args.size.split(',')
drawer = Drawer('data/'+args.name, [int(width), int(height)])
while not drawer.window.should_close():
drawer.update()
# the main application loop
while not drawer.window.should_close() and not drawer.window.next and not drawer.window.previous:
drawer.process()
if drawer.window.next and drawer.current + 2 < len(drawer.data_base.keys()): drawer.current = drawer.current + 1
if drawer.window.previous and drawer.current > 0: drawer.current = drawer.current - 1
drawer.window.next = False
drawer.window.previous = False
drawer.window.terminate()
| [((6, 13, 6, 38), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({}, {}), '()', False, 'import argparse\n')] |
ourobouros/aws-encryption-sdk-python | test/unit/test_structures.py | 1d0e40de7fef1b1131127a6f8626ef6a60739289 | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Unit test suite for aws_encryption_sdk.structures"""
import pytest
from aws_encryption_sdk.identifiers import Algorithm, ContentType, ObjectType, SerializationVersion
from aws_encryption_sdk.structures import DataKey, EncryptedDataKey, MasterKeyInfo, MessageHeader, RawDataKey
from .unit_test_utils import all_invalid_kwargs, all_valid_kwargs
pytestmark = [pytest.mark.unit, pytest.mark.local]
VALID_KWARGS = {
MessageHeader: [
dict(
version=SerializationVersion.V1,
type=ObjectType.CUSTOMER_AE_DATA,
algorithm=Algorithm.AES_256_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,
message_id=b"aosiejfoaiwej",
encryption_context={},
encrypted_data_keys=set([]),
content_type=ContentType.FRAMED_DATA,
content_aad_length=32456,
header_iv_length=32456,
frame_length=234567,
)
],
MasterKeyInfo: [
dict(provider_id="fawnofijawef", key_info="ajsnoiajerofi"),
dict(provider_id=b"fawnofijawef", key_info="ajsnoiajerofi"),
dict(provider_id="fawnofijawef", key_info=b"ajsnoiajerofi"),
dict(provider_id=b"fawnofijawef", key_info=b"ajsnoiajerofi"),
],
RawDataKey: [
dict(key_provider=MasterKeyInfo(provider_id="asjnoa", key_info=b"aosjfoaiwej"), data_key=b"aosijfoewaijf")
],
DataKey: [
dict(
key_provider=MasterKeyInfo(provider_id="asjnoa", key_info=b"aosjfoaiwej"),
data_key=b"oaijefoawiejf",
encrypted_data_key=b"aisofiawjef",
)
],
EncryptedDataKey: [
dict(
key_provider=MasterKeyInfo(provider_id="asjnoa", key_info=b"aosjfoaiwej"), encrypted_data_key=b"aisofiawjef"
)
],
}
@pytest.mark.parametrize("cls, kwargs", all_valid_kwargs(VALID_KWARGS))
def test_attributes_valid_kwargs(cls, kwargs):
cls(**kwargs)
@pytest.mark.parametrize("cls, kwargs", all_invalid_kwargs(VALID_KWARGS))
def test_attributes_invalid_kwargs(cls, kwargs):
with pytest.raises(TypeError):
cls(**kwargs)
@pytest.mark.parametrize(
"kwargs, attribute, expected_value",
(
(dict(provider_id="asfoijwae", key_info=b"oaiejfoeiwja"), "provider_id", "asfoijwae"),
(dict(provider_id=b"asfoijwae", key_info=b"oaiejfoeiwja"), "provider_id", "asfoijwae"),
(dict(provider_id="asfoijwae", key_info="oaiejfoeiwja"), "key_info", b"oaiejfoeiwja"),
(dict(provider_id="asfoijwae", key_info=b"oaiejfoeiwja"), "key_info", b"oaiejfoeiwja"),
),
)
def test_master_key_info_convert(kwargs, attribute, expected_value):
test = MasterKeyInfo(**kwargs)
assert getattr(test, attribute) == expected_value
| [((84, 11, 84, 34), 'aws_encryption_sdk.structures.MasterKeyInfo', 'MasterKeyInfo', ({}, {}), '(**kwargs)', False, 'from aws_encryption_sdk.structures import DataKey, EncryptedDataKey, MasterKeyInfo, MessageHeader, RawDataKey\n'), ((70, 9, 70, 33), 'pytest.raises', 'pytest.raises', ({(70, 23, 70, 32): 'TypeError'}, {}), '(TypeError)', False, 'import pytest\n'), ((46, 26, 46, 86), 'aws_encryption_sdk.structures.MasterKeyInfo', 'MasterKeyInfo', (), '', False, 'from aws_encryption_sdk.structures import DataKey, EncryptedDataKey, MasterKeyInfo, MessageHeader, RawDataKey\n'), ((50, 25, 50, 85), 'aws_encryption_sdk.structures.MasterKeyInfo', 'MasterKeyInfo', (), '', False, 'from aws_encryption_sdk.structures import DataKey, EncryptedDataKey, MasterKeyInfo, MessageHeader, RawDataKey\n'), ((57, 25, 57, 85), 'aws_encryption_sdk.structures.MasterKeyInfo', 'MasterKeyInfo', (), '', False, 'from aws_encryption_sdk.structures import DataKey, EncryptedDataKey, MasterKeyInfo, MessageHeader, RawDataKey\n')] |
CristianLazoQuispe/Datathon-Interbank-2020 | codes/utils/mygraph.py | 54f5d11fe83eb5a8ea8284be13d96e9e12978354 | import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
path_results = '../results/images/'
# this function receives a dataset with binary target and it will graph a hist of values
def graph_target(data,name="target",figsize=(6,4),title_name=None,color_text="white",save=False,name_file='target_distribution'):
plt.figure(figsize=figsize)
total = float(len(data)) # one person per row
title_name = "Target distribution"+" of "+str(int(total))+" users" if title_name is None else title_name+" of "+str(int(total))+" users"
ax = sns.countplot(x=name, data=data) # for Seaborn version 0.7 and more
for p in ax.patches:
height = p.get_height()
ax.text(p.get_x()+p.get_width()/2.,
height/3,
'{:.2f}%\n{:d}'.format(100*height/total,height),
ha="center",color=color_text,fontweight='bold')#fontsize=10
plt.title(title_name)
plt.show()
if save:
figure = ax.get_figure()
figure.savefig(path_results+name_file+'.png',dpi=400, bbox_inches = 'tight')
# plot histograms of train and test to understand the differences between them
def plot_comp_hist(data1,data2,l_range=[-np.inf,np.inf],labels=['x','y'],title='histogram',bins=20,alpha=0.5):
x = data1[(data1>=l_range[0])&(data1<l_range[1])]
y = data2[(data2>=l_range[0])&(data2<l_range[1])]
plt.hist([x, y],label=labels, bins = bins, alpha=alpha)
plt.legend(loc='upper right')
plt.title(title)
#rcc_train[(rcc_train.saldo>=0.2)&(rcc_train.saldo<3)].saldo.plot.hist(title="Fraud Tranascation <3", alpha=0.5)
#rcc_train[(rcc_test.saldo>=0.2)&(rcc_test.saldo<3)].saldo.plot.hist(title="Fraud Tranascation <3", alpha=0.5) | [((10, 4, 10, 31), 'matplotlib.pyplot.figure', 'plt.figure', (), '', True, 'import matplotlib.pyplot as plt\n'), ((13, 9, 13, 41), 'seaborn.countplot', 'sns.countplot', (), '', True, 'import seaborn as sns\n'), ((20, 4, 20, 25), 'matplotlib.pyplot.title', 'plt.title', ({(20, 14, 20, 24): 'title_name'}, {}), '(title_name)', True, 'import matplotlib.pyplot as plt\n'), ((21, 4, 21, 14), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((30, 4, 30, 59), 'matplotlib.pyplot.hist', 'plt.hist', (), '', True, 'import matplotlib.pyplot as plt\n'), ((31, 4, 31, 33), 'matplotlib.pyplot.legend', 'plt.legend', (), '', True, 'import matplotlib.pyplot as plt\n'), ((32, 4, 32, 20), 'matplotlib.pyplot.title', 'plt.title', ({(32, 14, 32, 19): 'title'}, {}), '(title)', True, 'import matplotlib.pyplot as plt\n')] |
td00/pretix | src/pretix/base/validators.py | e31bd7600c85598de135f2eb5012e2f33fdb1d11 | from django.core.exceptions import ValidationError
from django.utils.deconstruct import deconstructible
from django.utils.translation import ugettext_lazy as _
class BlacklistValidator:
blacklist = []
def __call__(self, value):
# Validation logic
if value in self.blacklist:
raise ValidationError(
_('This slug has an invalid value: %(value)s.'),
code='invalid',
params={'value': value},
)
@deconstructible
class EventSlugBlacklistValidator(BlacklistValidator):
blacklist = [
'download',
'healthcheck',
'locale',
'control',
'redirect',
'jsi18n',
'metrics',
'_global',
'__debug__',
'api',
'events',
]
@deconstructible
class OrganizerSlugBlacklistValidator(BlacklistValidator):
blacklist = [
'download',
'healthcheck',
'locale',
'control',
'pretixdroid',
'redirect',
'jsi18n',
'metrics',
'_global',
'__debug__',
'about',
'api',
]
| [((14, 16, 14, 63), 'django.utils.translation.ugettext_lazy', '_', ({(14, 18, 14, 62): '"""This slug has an invalid value: %(value)s."""'}, {}), "('This slug has an invalid value: %(value)s.')", True, 'from django.utils.translation import ugettext_lazy as _\n')] |
gfdb/fortnitepy | fortnitepy/ext/commands/bot.py | 1cedbddee1f81c96fc60b586cd2c16398bc2d45f | """
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import logging
import inspect
import asyncio
import types
import sys
import importlib
import collections
import traceback
from typing import Any, List, Optional, Mapping, Set
from fortnitepy.client import Client
from fortnitepy.auth import Auth
from fortnitepy.typedefs import MaybeCoro, ListOrTuple
from ._types import _BaseCommand
from .errors import (ExtensionFailed, ExtensionMissingEntryPoint,
ExtensionNotLoaded, ExtensionAlreadyLoaded,
ExtensionNotFound, CheckFailure, CommandError,
CommandNotFound)
from .core import GroupMixin
from .cog import Cog
from .view import StringView
from .context import Context
from .help import HelpCommand, FortniteHelpCommand
from .typedefs import Message
log = logging.getLogger(__name__)
def _is_submodule(parent: str, child: str) -> bool:
return parent == child or child.startswith(parent + ".")
class _DefaultRepr:
def __repr__(self) -> str:
return '<default-help-command>'
_default = _DefaultRepr()
class Bot(GroupMixin, Client):
"""Represents a fortnite bot.
This class is a subclass of :class:`fortnitepy.Client` and as a result
anything that you can do with a :class:`fortnitepy.Client` you can do with
this bot.
This class also subclasses :class:`.GroupMixin` to provide the
functionality to manage commands.
Attributes
-----------
command_prefix
The command prefix is what the message content must contain initially
to have a command invoked. This prefix could either be a string to
indicate what the prefix should be, or a callable that takes in the bot
as its first parameter and :class:`fortnitepy.FriendMessage` or
:class:`fortnitepy.PartyMessage` as its second parameter and returns
the prefix. This is to facilitate "dynamic" command prefixes. This
callable can be either a regular function or a coroutine.
An empty string as the prefix always matches, enabling prefix-less
command invocation.
The command prefix could also be an iterable of strings indicating that
multiple checks for the prefix should be used and the first one to
match will be the invocation prefix. You can get this prefix via
:attr:`.Context.prefix`. To avoid confusion empty iterables are not
allowed.
.. note::
When passing multiple prefixes be careful to not pass a prefix
that matches a longer prefix occurring later in the sequence. For
example, if the command prefix is ``('!', '!?')`` the ``'!?'``
prefix will never be matched to any message as the previous one
matches messages starting with ``!?``. This is especially important
when passing an empty string, it should always be last as no prefix
after it will be matched.
case_insensitive: :class:`bool`
Whether the commands should be case insensitive. Defaults to ``False``.
This attribute does not carry over to groups. You must set it to every
group if you require group commands to be case insensitive as well.
description: :class:`str`
The content prefixed into the default help message.
help_command: Optional[:class:`.HelpCommand`]
The help command implementation to use. This can be dynamically
set at runtime. To remove the help command pass ``None``. For more
information on implementing a help command, see
:ref:`ext_commands_help_command`.
owner_id: Optional[:class:`int`]
The user ID that owns the bot. This is used by :meth:`.is_owner()`
and checks that call this method.
owner_ids: Optional[Collection[:class:`int`]]
The user IDs that owns the bot. This is similar to `owner_id`.
For performance reasons it is recommended to use a :class:`set`
for the collection. You cannot set both `owner_id` and `owner_ids`.
This is used by :meth:`.is_owner()` and checks that call this method.
"""
def __init__(self, command_prefix: Any, auth: Auth, *,
help_command: Optional[HelpCommand] = _default,
description: Optional[str] = None,
**kwargs: Any) -> None:
kwargs['case_insensitive'] = kwargs.get('case_insensitive', False)
super().__init__(auth, **kwargs)
self.command_prefix = command_prefix
self.description = inspect.cleandoc(description) if description else ''
self.owner_id = kwargs.get('owner_id')
self.owner_ids = kwargs.get('owner_ids', set())
if self.owner_id and self.owner_ids:
raise TypeError('Both owner_id and owner_ids are set.')
if (self.owner_ids and not isinstance(self.owner_ids,
collections.abc.Collection)):
raise TypeError(
'owner_ids must be a collection not '
'{0.__class__!r}'.format(self.owner_ids)
)
self.__cogs = {}
self.__extensions = {}
self._checks = []
self._check_once = []
self._help_command = None
self._before_invoke = None
self._after_invoke = None
if help_command is _default:
self.help_command = FortniteHelpCommand()
else:
self.help_command = help_command
self.add_event_handler('friend_message', self.process_commands)
self.add_event_handler('party_message', self.process_commands)
def register_methods(self) -> None:
for _, obj in inspect.getmembers(self):
if isinstance(obj, _BaseCommand):
obj.instance = self
if obj.parent is None:
try:
self.add_command(obj)
except CommandError:
traceback.print_exc()
continue
super().register_methods()
async def close(self, *,
close_http: bool = True,
dispatch_close: bool = True) -> None:
if dispatch_close:
await asyncio.gather(
self.dispatch_and_wait_event('before_close'),
self.dispatch_and_wait_event('close'),
)
for extension in tuple(self.__extensions):
try:
self.unload_extension(extension)
except Exception:
pass
for cog in tuple(self.__cogs):
try:
self.remove_cog(cog)
except Exception:
pass
await self._close(
close_http=close_http,
dispatch_close=dispatch_close
)
def check(self, func: MaybeCoro) -> MaybeCoro:
r"""A decorator that adds a check globally to every command.
.. note::
This function can either be a regular function or a coroutine.
This function takes a single parameter, :class:`.Context`, and can
only raise exceptions inherited from :exc:`.CommandError`.
Example
-------
.. code-block:: python3
@bot.check
def global_check(ctx):
# Allows only party commands.
return ctx.party is not None
"""
self.add_check(func)
return func
def add_check(self, func: MaybeCoro, *,
call_once: bool = False) -> None:
"""Adds a global check to the bot.
This is the non-decorator interface to :meth:`.check`
and :meth:`.check_once`.
Parameters
----------
func
The function that was used as a global check.
call_once: :class:`bool`
If the function should only be called once per
:meth:`Command.invoke` call.
"""
if call_once:
self._check_once.append(func)
else:
self._checks.append(func)
def remove_check(self, func: MaybeCoro, *,
call_once: bool = False) -> None:
"""Removes a global check from the bot.
Parameters
----------
func
The function to remove from the global checks.
call_once: :class:`bool`
If the function was added with ``call_once=True`` in
the :meth:`.Bot.add_check` call or using :meth:`.check_once`.
"""
list_ = self._check_once if call_once else self._checks
try:
list_.remove(func)
except ValueError:
pass
def check_once(self, func: MaybeCoro) -> MaybeCoro:
r"""A decorator that adds a "call once" global check to the bot.
Unlike regular global checks, this one is called only once
per :meth:`Command.invoke` call.
Regular global checks are called whenever a command is called
or :meth:`.Command.can_run` is called. This type of check
bypasses that and ensures that it's called only once, even inside
the default help command.
.. note::
This function can either be a regular function or a coroutine.
This function takes a single parameter, :class:`.Context`, and can
only raise exceptions inherited from :exc:`.CommandError`.
Example
-------
.. code-block:: python3
@bot.check_once
def whitelist(ctx):
return ctx.message.author.id in my_whitelist
"""
self.add_check(func, call_once=True)
return func
async def can_run(self, ctx: Context, *,
call_once: bool = False) -> bool:
data = self._check_once if call_once else self._checks
if len(data) == 0:
return True
for func in data:
if asyncio.iscoroutinefunction(func):
res = await func(ctx)
else:
res = func(ctx)
if not res:
return False
return True
async def is_owner(self, user_id: str) -> bool:
"""|coro|
Checks if a user id is the owner of the bot.
Parameters
----------
user_id: :class:`str`
The user id to check for.
Returns
-------
:class:`bool`
Whether the user is the owner.
"""
if self.owner_id:
return user_id == self.owner_id
else:
return user_id in self.owner_ids
def before_invoke(self, coro: MaybeCoro) -> MaybeCoro:
"""A decorator that registers a coroutine as a pre-invoke hook.
A pre-invoke hook is called directly before the command is
called. This makes it a useful function to set up database
connections or any type of set up required.
This pre-invoke hook takes a sole parameter, a :class:`.Context`.
.. note::
The :meth:`~.Bot.before_invoke` and :meth:`~.Bot.after_invoke`
hooks are only called if all checks and argument parsing
procedures pass without error. If any check or argument parsing
procedures fail then the hooks are not called.
Parameters
----------
coro
The coroutine to register as the pre-invoke hook.
Raises
------
TypeError
The coroutine passed is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise TypeError('The pre-invoke hook must be a coroutine.')
self._before_invoke = coro
return coro
def after_invoke(self, coro: MaybeCoro) -> MaybeCoro:
r"""A decorator that registers a coroutine as a post-invoke hook.
A post-invoke hook is called directly after the command is
called. This makes it a useful function to clean-up database
connections or any type of clean up required.
This post-invoke hook takes a sole parameter, a :class:`.Context`.
.. note::
Similar to :meth:`~.Bot.before_invoke`\, this is not called unless
checks and argument parsing procedures succeed. This hook is,
however, **always** called regardless of the internal command
callback raising an error (i.e. :exc:`.CommandInvokeError`\).
This makes it ideal for clean-up scenarios.
Parameters
----------
coro:
The coroutine to register as the post-invoke hook.
Raises
------
TypeError
The coroutine passed is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise TypeError('The post-invoke hook must be a coroutine.')
self._after_invoke = coro
return coro
def add_cog(self, cog: Cog) -> None:
"""Adds a "cog" to the bot.
A cog is a class that has its own event listeners and commands.
Parameters
----------
cog: :class:`.Cog`
The cog to register to the bot.
Raises
------
TypeError
The cog does not inherit from :class:`.Cog`.
CommandError
An error happened during loading.
"""
if not isinstance(cog, Cog):
raise TypeError('Cogs must derive from Cog.')
cog = cog._inject(self)
self.__cogs[cog.__cog_name__] = cog
def remove_cog(self, name: str) -> None:
"""Removes a cog from the bot.
All registered commands and event listeners that the
cog has registered will be removed as well.
If no cog is found then this method has no effect.
Parameters
----------
name: :class:`str`
The name of the cog to remove.
"""
cog = self.__cogs.pop(name, None)
if cog is None:
return
help_command = self.help_command
if help_command and help_command.cog is cog:
help_command.cog = None
cog._eject(self)
def get_cog(self, name: str) -> Optional[Cog]:
"""Gets the cog instance requested.
If the cog is not found, ``None`` is returned instead.
Parameters
-----------
name: :class:`str`
The name of the cog you are requesting.
This is equivalent to the name passed via keyword
argument in class creation or the class name if unspecified.
"""
return self.__cogs.get(name)
@property
def cogs(self) -> Mapping[str, Cog]:
"""Mapping[:class:`str`, :class:`Cog`]: A read-only mapping of cog
name to cog.
"""
return types.MappingProxyType(self.__cogs)
def _remove_module_references(self, name: str) -> None:
# find all references to the module
# remove the cogs registered from the module
for cogname, cog in self.__cogs.copy().items():
if _is_submodule(name, cog.__module__):
self.remove_cog(cogname)
# remove all the commands from the module
for cmd in self.all_commands.copy().values():
if cmd.module is not None and _is_submodule(name, cmd.module):
if isinstance(cmd, GroupMixin):
cmd.recursively_remove_all_commands()
self.remove_command(cmd.name)
# remove all the listeners from the module
for event_list in self._events.copy().values():
remove = []
for index, event in enumerate(event_list):
if (event.__module__ is not None
and _is_submodule(name, event.__module__)):
remove.append(index)
for index in reversed(remove):
del event_list[index]
def _call_module_finalizers(self, lib: object, key: str) -> None:
try:
func = getattr(lib, 'cog_teardown')
except AttributeError:
pass
else:
try:
func(self)
except Exception:
pass
finally:
self.__extensions.pop(key, None)
sys.modules.pop(key, None)
name = lib.__name__
for module in list(sys.modules.keys()):
if _is_submodule(name, module):
del sys.modules[module]
def _load_from_module_spec(self, spec: types.ModuleType,
key: str) -> None:
# precondition: key not in self.__extensions
lib = importlib.util.module_from_spec(spec)
sys.modules[key] = lib
try:
spec.loader.exec_module(lib)
except Exception as e:
del sys.modules[key]
raise ExtensionFailed(key, e) from e
try:
setup = getattr(lib, 'extension_setup')
except AttributeError:
del sys.modules[key]
raise ExtensionMissingEntryPoint(key)
try:
setup(self)
except Exception as e:
del sys.modules[key]
self._remove_module_references(lib.__name__)
self._call_module_finalizers(lib, key)
raise ExtensionFailed(key, e) from e
else:
self.__extensions[key] = lib
def load_extension(self, name: str) -> None:
"""Loads an extension.
An extension is a python module that contains commands, cogs, or
listeners.
An extension must have a global function, ``extension_setup`` defined
as the entry point on what to do when the extension is loaded. This
entry point must have a single argument, the ``bot``.
Parameters
----------
name: :class:`str`
The extension name to load. It must be dot separated like
regular Python imports if accessing a sub-module. e.g.
``foo.test`` if you want to import ``foo/test.py``.
Raises
------
ExtensionNotFound
The extension could not be imported.
ExtensionAlreadyLoaded
The extension is already loaded.
ExtensionMissingEntryPoint
The extension does not have a extension_setup function.
ExtensionFailed
The extension or its setup function had an execution error.
"""
if name in self.__extensions:
raise ExtensionAlreadyLoaded(name)
spec = importlib.util.find_spec(name)
if spec is None:
raise ExtensionNotFound(name)
self._load_from_module_spec(spec, name)
def unload_extension(self, name: str) -> None:
"""Unloads an extension.
When the extension is unloaded, all commands, listeners, and cogs are
removed from the bot and the module is un-imported.
The extension can provide an optional global function,
``cog_teardown``, to do miscellaneous clean-up if necessary. This
function takes a single parameter, the ``bot``, similar to
``extension_setup`` from :meth:`~.Bot.load_extension`.
Parameters
------------
name: :class:`str`
The extension name to unload. It must be dot separated like
regular Python imports if accessing a sub-module. e.g.
``foo.test`` if you want to import ``foo/test.py``.
Raises
-------
ExtensionNotLoaded
The extension was not loaded.
"""
lib = self.__extensions.get(name)
if lib is None:
raise ExtensionNotLoaded(name)
self._remove_module_references(lib.__name__)
self._call_module_finalizers(lib, name)
def reload_extension(self, name: str) -> None:
"""Atomically reloads an extension.
This replaces the extension with the same extension, only refreshed.
This is equivalent to a :meth:`unload_extension` followed by
a :meth:`load_extension` except done in an atomic way. That is, if an
operation fails mid-reload then the bot will roll-back to the prior
working state.
Parameters
------------
name: :class:`str`
The extension name to reload. It must be dot separated like
regular Python imports if accessing a sub-module. e.g.
``foo.test`` if you want to import ``foo/test.py``.
Raises
-------
ExtensionNotLoaded
The extension was not loaded.
ExtensionNotFound
The extension could not be imported.
ExtensionMissingEntryPoint
The extension does not have a extension_setup function.
ExtensionFailed
The extension setup function had an execution error.
"""
lib = self.__extensions.get(name)
if lib is None:
raise ExtensionNotLoaded(name)
# get the previous module states from sys modules
modules = {
name: module
for name, module in sys.modules.items()
if _is_submodule(lib.__name__, name)
}
try:
# Unload and then load the module...
self._remove_module_references(lib.__name__)
self._call_module_finalizers(lib, name)
self.load_extension(name)
except Exception:
# if the load failed, the remnants should have been
# cleaned from the load_extension function call
# so let's load it from our old compiled library.
lib.extension_setup(self)
self.__extensions[name] = lib
# revert sys.modules back to normal and raise back to caller
sys.modules.update(modules)
raise
@property
def extensions(self) -> Mapping[str, types.ModuleType]:
"""Mapping[:class:`str`, :class:`py:types.ModuleType`]: A read-only
mapping of extension name to extension.
"""
return types.MappingProxyType(self.__extensions)
@property
def help_command(self) -> Optional[HelpCommand]:
return self._help_command
@help_command.setter
def help_command(self, value: Optional[HelpCommand]) -> None:
if value is not None:
if not isinstance(value, HelpCommand):
raise TypeError('help_command must be a subclass '
'of HelpCommand')
if self._help_command is not None:
self._help_command._remove_from_bot(self)
self._help_command = value
value._add_to_bot(self)
elif self._help_command is not None:
self._help_command._remove_from_bot(self)
self._help_command = None
else:
self._help_command = None
async def get_prefix(self, message: Message) -> Any:
"""|coro|
Retrieves the prefix the bot is listening to with the message as
a context.
Parameters
----------
message: Union[:class:`fortnitepy.FriendMessage`, :class:`fortnitepy.PartyMessage`]
The message context to get the prefix of.
Returns
--------
Union[List[:class:`str`], :class:`str`]
A list of prefixes or a single prefix that the bot is
listening for.
""" # noqa
prefix = ret = self.command_prefix
if callable(prefix):
if asyncio.iscoroutinefunction(prefix):
ret = await prefix(self, message)
else:
ret = prefix(self, message)
if not isinstance(ret, str):
try:
ret = list(ret)
except TypeError:
# It's possible that a generator raised this exception. Don't
# replace it with our own error if that's the case.
if isinstance(ret, collections.abc.Iterable):
raise
raise TypeError('command_prefix must be plain string, '
'iterable of strings, or callable '
'returning either of these, not '
'{}'.format(ret.__class__.__name__))
if not ret:
raise ValueError('Iterable command_prefix must contain at '
'least one prefix')
return ret
async def get_context(self, message: Message, *,
cls: Context = Context) -> Context:
r"""|coro|
Returns the invocation context from the message.
This is a more low-level counter-part for :meth:`.process_commands`
to allow users more fine grained control over the processing.
The returned context is not guaranteed to be a valid invocation
context, :attr:`.Context.valid` must be checked to make sure it is.
If the context is not valid then it is not a valid candidate to be
invoked under :meth:`~.Bot.invoke`.
Parameters
----------
message: Union[:class:`fortnitepy.FriendMessage`, :class:`fortnitepy.PartyMessage`]
The message to get the invocation context from.
cls
The factory class that will be used to create the context.
By default, this is :class:`.Context`. Should a custom
class be provided, it must be similar enough to :class:`.Context`\'s
interface.
Returns
-------
:class:`.Context`
The invocation context. The type of this can change via the
``cls`` parameter.
""" # noqa
view = StringView(message.content)
ctx = cls(prefix=None, view=view, bot=self, message=message)
prefix = await self.get_prefix(message)
invoked_prefix = prefix
if isinstance(prefix, str):
if not view.skip_string(prefix):
return ctx
else:
try:
if message.content.startswith(tuple(prefix)):
for element in prefix:
if view.skip_string(element):
invoked_prefix = element
break
else:
invoked_prefix = None
else:
return ctx
except TypeError:
if not isinstance(prefix, list):
raise TypeError('get_prefix must return either a string '
'or a list of string, not '
'{}'.format(prefix.__class__.__name__))
for value in prefix:
if not isinstance(value, str):
raise TypeError('Iterable command_prefix or list '
'returned from get_prefix must '
'contain only strings, not '
'{}'.format(value.__class__.__name__))
raise
invoker = view.get_word()
ctx.invoked_with = invoker
ctx.prefix = invoked_prefix
ctx.command = self.all_commands.get(invoker)
return ctx
def _print_error(self, ctx: Context, error: Exception) -> None:
print(
'Ignoring exception in command {}:'.format(ctx.command),
file=sys.stderr
)
traceback.print_exception(
type(error),
error,
error.__traceback__,
file=sys.stderr
)
async def wait_for_futures(self, futures: ListOrTuple, *,
check: Optional[callable] = None,
timeout: Optional[int] = None,
cancel: bool = False) -> None:
def _cancel_futs(pending_futures: Set[asyncio.Future]) -> None:
for p in pending_futures:
if not p.cancelled():
p.cancel()
pending = futures
while pending:
done, pending = await asyncio.wait(
pending,
return_when=asyncio.FIRST_COMPLETED,
timeout=timeout
)
# Set should only contain one value
for future in done:
if check is None or check(future):
if cancel:
_cancel_futs(pending)
return future
async def _wait_for_error_return(self, futures: List[asyncio.Future],
ctx: Context,
error: Exception) -> None:
def check(future):
return future.result() is False
ret = await self.wait_for_futures(futures, check=check)
if isinstance(ret, asyncio.Future):
self._print_error(ctx, error)
def dispatch_error(self, ctx: Context, error: Exception) -> None:
if self._event_has_handler('command_error'):
futures = self.dispatch_event('command_error', ctx, error)
asyncio.ensure_future(self._wait_for_error_return(
futures,
ctx,
error
))
else:
self._print_error(ctx, error)
async def invoke(self, ctx: Context) -> None:
"""|coro|
Invokes the command given under the invocation context and
handles all the internal event dispatch mechanisms.
Parameters
-----------
ctx: :class:`.Context`
The invocation context to invoke.
"""
if ctx.command is not None:
self.dispatch_event('command', ctx)
try:
if await self.can_run(ctx, call_once=True):
await ctx.command.invoke(ctx)
else:
raise CheckFailure('The global check once functions '
'failed.')
except CommandError as exc:
await ctx.command.dispatch_error(ctx, exc)
else:
self.dispatch_event('command_completion', ctx)
elif ctx.invoked_with:
exc = CommandNotFound('Command "{}" is not found'
''.format(ctx.invoked_with))
self.dispatch_error(ctx, exc)
async def process_commands(self, message: Message) -> None:
"""|coro|
This function processes the commands that have been registered
to the bot and other groups. Without this coroutine, none of the
commands will be triggered.
By default, this coroutine is called automatically when a new
message is received.
This is built using other low level tools, and is equivalent to a
call to :meth:`~.Bot.get_context` followed by a call to
:meth:`~.Bot.invoke`.
Parameters
-----------
message: Union[:class:`fortnitepy.FriendMessage`, :class:`fortnitepy.PartyMessage`]
The message to process commands for.
""" # noqa
if message.author.id == self.user.id:
return
ctx = await self.get_context(message)
await self.invoke(ctx)
| [((53, 6, 53, 33), 'logging.getLogger', 'logging.getLogger', ({(53, 24, 53, 32): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((167, 22, 167, 46), 'inspect.getmembers', 'inspect.getmembers', ({(167, 41, 167, 45): 'self'}, {}), '(self)', False, 'import inspect\n'), ((466, 15, 466, 50), 'types.MappingProxyType', 'types.MappingProxyType', ({(466, 38, 466, 49): 'self.__cogs'}, {}), '(self.__cogs)', False, 'import types\n'), ((514, 14, 514, 51), 'importlib.util.module_from_spec', 'importlib.util.module_from_spec', ({(514, 46, 514, 50): 'spec'}, {}), '(spec)', False, 'import importlib\n'), ((569, 15, 569, 45), 'importlib.util.find_spec', 'importlib.util.find_spec', ({(569, 40, 569, 44): 'name'}, {}), '(name)', False, 'import importlib\n'), ((664, 15, 664, 56), 'types.MappingProxyType', 'types.MappingProxyType', ({(664, 38, 664, 55): 'self.__extensions'}, {}), '(self.__extensions)', False, 'import types\n'), ((136, 27, 136, 56), 'inspect.cleandoc', 'inspect.cleandoc', ({(136, 44, 136, 55): 'description'}, {}), '(description)', False, 'import inspect\n'), ((305, 15, 305, 48), 'asyncio.iscoroutinefunction', 'asyncio.iscoroutinefunction', ({(305, 43, 305, 47): 'func'}, {}), '(func)', False, 'import asyncio\n'), ((361, 15, 361, 48), 'asyncio.iscoroutinefunction', 'asyncio.iscoroutinefunction', ({(361, 43, 361, 47): 'coro'}, {}), '(coro)', False, 'import asyncio\n'), ((394, 15, 394, 48), 'asyncio.iscoroutinefunction', 'asyncio.iscoroutinefunction', ({(394, 43, 394, 47): 'coro'}, {}), '(coro)', False, 'import asyncio\n'), ((505, 12, 505, 38), 'sys.modules.pop', 'sys.modules.pop', ({(505, 28, 505, 31): 'key', (505, 33, 505, 37): 'None'}, {}), '(key, None)', False, 'import sys\n'), ((706, 15, 706, 50), 'asyncio.iscoroutinefunction', 'asyncio.iscoroutinefunction', ({(706, 43, 706, 49): 'prefix'}, {}), '(prefix)', False, 'import asyncio\n'), ((507, 31, 507, 49), 'sys.modules.keys', 'sys.modules.keys', ({}, {}), '()', False, 'import sys\n'), ((639, 32, 639, 51), 'sys.modules.items', 'sys.modules.items', ({}, {}), '()', False, 'import sys\n'), ((656, 12, 656, 39), 'sys.modules.update', 'sys.modules.update', ({(656, 31, 656, 38): 'modules'}, {}), '(modules)', False, 'import sys\n'), ((829, 34, 833, 13), 'asyncio.wait', 'asyncio.wait', (), '', False, 'import asyncio\n'), ((175, 24, 175, 45), 'traceback.print_exc', 'traceback.print_exc', ({}, {}), '()', False, 'import traceback\n')] |
ChuanleiGuo/AlgorithmsPlayground | LeetCodeSolutions/python/64_Minimum_Path_Sum.py | 90b6287b742c8bfd3797540c408d679be2821a40 | class Solution(object):
def minPathSum(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
m, n = len(grid), len(grid[0])
dp = [[0] * n for _ in range(m)]
for i in range(m):
for j in range(n):
if i == 0 and j == 0:
dp[i][j] = grid[0][0]
elif i == 0:
dp[i][j] = grid[i][j] + dp[i][j - 1]
elif j == 0:
dp[i][j] = grid[i][j] + dp[i - 1][j]
else:
dp[i][j] = grid[i][j] + min(dp[i - 1][j], dp[i][j - 1])
return dp[m - 1][n - 1]
| [] |
tankbusta/rescache | paths_win.py | 86ca7f3fb66e28a8761f0995a300f57a73a9561d | import _winreg
import os
def get_shared_cache_folder():
"""
Look in the registry for the configured cache folder.
If there is no entry, then we create one.
:return:
"""
_winreg.aReg = _winreg.ConnectRegistry(None, _winreg.HKEY_CURRENT_USER)
try:
key = _winreg.OpenKey(_winreg.aReg, r"SOFTWARE\CCP\EVEONLINE")
path, _ = _winreg.QueryValueEx(key, "CACHEFOLDER")
except OSError:
return None
return path
def set_shared_cache_folder(folder_path):
if not os.path.isdir(folder_path):
try:
os.makedirs(folder_path)
except OSError:
raise ValueError("Could not create directory {}".format(folder_path))
folder_path = os.path.normpath(folder_path) + os.sep
key_eveonline = _winreg.CreateKey(_winreg.aReg, r"SOFTWARE\CCP\EVEONLINE")
_winreg.SetValueEx(key_eveonline, "CACHEFOLDER", 0, _winreg.REG_SZ, folder_path)
key_eveprobe = _winreg.CreateKey(_winreg.aReg, r"SOFTWARE\CCP\EVEPROBE")
_winreg.SetValueEx(key_eveprobe, "CACHEFOLDER", 0, _winreg.REG_SZ, folder_path)
def get_index_path(hint):
return hint
| [((11, 19, 11, 75), '_winreg.ConnectRegistry', '_winreg.ConnectRegistry', ({(11, 43, 11, 47): 'None', (11, 49, 11, 74): '_winreg.HKEY_CURRENT_USER'}, {}), '(None, _winreg.HKEY_CURRENT_USER)', False, 'import _winreg\n'), ((28, 20, 28, 78), '_winreg.CreateKey', '_winreg.CreateKey', ({(28, 38, 28, 50): '_winreg.aReg', (28, 52, 28, 77): '"""SOFTWARE\\\\CCP\\\\EVEONLINE"""'}, {}), "(_winreg.aReg, 'SOFTWARE\\\\CCP\\\\EVEONLINE')", False, 'import _winreg\n'), ((29, 4, 29, 84), '_winreg.SetValueEx', '_winreg.SetValueEx', ({(29, 23, 29, 36): 'key_eveonline', (29, 38, 29, 51): '"""CACHEFOLDER"""', (29, 53, 29, 54): '(0)', (29, 56, 29, 70): '_winreg.REG_SZ', (29, 72, 29, 83): 'folder_path'}, {}), "(key_eveonline, 'CACHEFOLDER', 0, _winreg.REG_SZ, folder_path\n )", False, 'import _winreg\n'), ((31, 19, 31, 76), '_winreg.CreateKey', '_winreg.CreateKey', ({(31, 37, 31, 49): '_winreg.aReg', (31, 51, 31, 75): '"""SOFTWARE\\\\CCP\\\\EVEPROBE"""'}, {}), "(_winreg.aReg, 'SOFTWARE\\\\CCP\\\\EVEPROBE')", False, 'import _winreg\n'), ((32, 4, 32, 83), '_winreg.SetValueEx', '_winreg.SetValueEx', ({(32, 23, 32, 35): 'key_eveprobe', (32, 37, 32, 50): '"""CACHEFOLDER"""', (32, 52, 32, 53): '(0)', (32, 55, 32, 69): '_winreg.REG_SZ', (32, 71, 32, 82): 'folder_path'}, {}), "(key_eveprobe, 'CACHEFOLDER', 0, _winreg.REG_SZ, folder_path)", False, 'import _winreg\n'), ((13, 14, 13, 70), '_winreg.OpenKey', '_winreg.OpenKey', ({(13, 30, 13, 42): '_winreg.aReg', (13, 44, 13, 69): '"""SOFTWARE\\\\CCP\\\\EVEONLINE"""'}, {}), "(_winreg.aReg, 'SOFTWARE\\\\CCP\\\\EVEONLINE')", False, 'import _winreg\n'), ((14, 18, 14, 58), '_winreg.QueryValueEx', '_winreg.QueryValueEx', ({(14, 39, 14, 42): 'key', (14, 44, 14, 57): '"""CACHEFOLDER"""'}, {}), "(key, 'CACHEFOLDER')", False, 'import _winreg\n'), ((21, 11, 21, 37), 'os.path.isdir', 'os.path.isdir', ({(21, 25, 21, 36): 'folder_path'}, {}), '(folder_path)', False, 'import os\n'), ((26, 18, 26, 47), 'os.path.normpath', 'os.path.normpath', ({(26, 35, 26, 46): 'folder_path'}, {}), '(folder_path)', False, 'import os\n'), ((23, 12, 23, 36), 'os.makedirs', 'os.makedirs', ({(23, 24, 23, 35): 'folder_path'}, {}), '(folder_path)', False, 'import os\n')] |
yuta-komura/vishnu | venv/lib/python3.8/site-packages/dateparser/data/date_translation_data/ebu.py | 67173b674d5f4f3be189474103612447ef69ab44 | # -*- coding: utf-8 -*-
info = {
"name": "ebu",
"date_order": "DMY",
"january": [
"mweri wa mbere",
"mbe"
],
"february": [
"mweri wa kaĩri",
"kai"
],
"march": [
"mweri wa kathatũ",
"kat"
],
"april": [
"mweri wa kana",
"kan"
],
"may": [
"mweri wa gatano",
"gat"
],
"june": [
"mweri wa gatantatũ",
"gan"
],
"july": [
"mweri wa mũgwanja",
"mug"
],
"august": [
"mweri wa kanana",
"knn"
],
"september": [
"mweri wa kenda",
"ken"
],
"october": [
"mweri wa ikũmi",
"iku"
],
"november": [
"mweri wa ikũmi na ũmwe",
"imw"
],
"december": [
"mweri wa ikũmi na kaĩrĩ",
"igi"
],
"monday": [
"njumatatu",
"tat"
],
"tuesday": [
"njumaine",
"ine"
],
"wednesday": [
"njumatano",
"tan"
],
"thursday": [
"aramithi",
"arm"
],
"friday": [
"njumaa",
"maa"
],
"saturday": [
"njumamothii",
"nmm"
],
"sunday": [
"kiumia",
"kma"
],
"am": [
"ki"
],
"pm": [
"ut"
],
"year": [
"mwaka"
],
"month": [
"mweri"
],
"week": [
"kiumia"
],
"day": [
"mũthenya"
],
"hour": [
"ithaa"
],
"minute": [
"ndagĩka"
],
"second": [
"sekondi"
],
"relative-type": {
"1 year ago": [
"last year"
],
"0 year ago": [
"this year"
],
"in 1 year": [
"next year"
],
"1 month ago": [
"last month"
],
"0 month ago": [
"this month"
],
"in 1 month": [
"next month"
],
"1 week ago": [
"last week"
],
"0 week ago": [
"this week"
],
"in 1 week": [
"next week"
],
"1 day ago": [
"ĩgoro"
],
"0 day ago": [
"ũmũnthĩ"
],
"in 1 day": [
"rũciũ"
],
"0 hour ago": [
"this hour"
],
"0 minute ago": [
"this minute"
],
"0 second ago": [
"now"
]
},
"locale_specific": {},
"skip": [
" ",
".",
",",
";",
"-",
"/",
"'",
"|",
"@",
"[",
"]",
","
]
}
| [] |
debatelab/deepa2 | deepa2/preptrain/__init__.py | 1a9e8c357d7e3924808c703ec9f4a6611a4b5f93 | """Preprocessing DeepA2 datasets for LM training"""
# flake8: noqa
from deepa2.preptrain.t2tpreprocessor import T2TPreprocessor
| [] |
martinfarrow/awspk | setup.py | c3b5f8ede44ca96473b95f52ddb2291a45828565 | #!/usr/bin/env python3
from setuptools import setup, find_packages
setup(name='awspk',
version='0.1',
description='A aws cli pen knife with loads of interested stuff',
author='Martin Farrow',
author_email='[email protected]',
py_modules=['awspk'],
license='LICENSE',
)
| [((5, 0, 12, 5), 'setuptools.setup', 'setup', (), '', False, 'from setuptools import setup, find_packages\n')] |
JayRovacsek/pyautoclick | pyclicker/lib/python3.7/site-packages/Xlib/display.py | e136a58c129332933eb8455dd7c8e16222d54fb2 | # Xlib.display -- high level display object
#
# Copyright (C) 2000 Peter Liljenberg <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
# Python modules
import types
# Python 2/3 compatibility.
from six import create_unbound_method
# Xlib modules
from . import error
from . import ext
from . import X
# Xlib.protocol modules
from .protocol import display as protocol_display
from .protocol import request, event, rq
# Xlib.xobjects modules
from .xobject import resource
from .xobject import drawable
from .xobject import fontable
from .xobject import colormap
from .xobject import cursor
_resource_baseclasses = {
'resource': resource.Resource,
'drawable': drawable.Drawable,
'window': drawable.Window,
'pixmap': drawable.Pixmap,
'fontable': fontable.Fontable,
'font': fontable.Font,
'gc': fontable.GC,
'colormap': colormap.Colormap,
'cursor': cursor.Cursor,
}
_resource_hierarchy = {
'resource': ('drawable', 'window', 'pixmap',
'fontable', 'font', 'gc',
'colormap', 'cursor'),
'drawable': ('window', 'pixmap'),
'fontable': ('font', 'gc')
}
class _BaseDisplay(protocol_display.Display):
resource_classes = _resource_baseclasses.copy()
# Implement a cache of atom names, used by Window objects when
# dealing with some ICCCM properties not defined in Xlib.Xatom
def __init__(self, *args, **keys):
protocol_display.Display.__init__(self, *args, **keys)
self._atom_cache = {}
def get_atom(self, atomname, only_if_exists=0):
if atomname in self._atom_cache:
return self._atom_cache[atomname]
r = request.InternAtom(display = self, name = atomname, only_if_exists = only_if_exists)
# don't cache NONE responses in case someone creates this later
if r.atom != X.NONE:
self._atom_cache[atomname] = r.atom
return r.atom
class Display(object):
def __init__(self, display = None):
self.display = _BaseDisplay(display)
# Create the keymap cache
self._keymap_codes = [()] * 256
self._keymap_syms = {}
self._update_keymap(self.display.info.min_keycode,
(self.display.info.max_keycode
- self.display.info.min_keycode + 1))
# Translations for keysyms to strings.
self.keysym_translations = {}
# Find all supported extensions
self.extensions = []
self.class_extension_dicts = {}
self.display_extension_methods = {}
# a dict that maps the event name to the code
# or, when it's an event with a subcode, to a tuple of (event,subcode)
# note this wraps the dict so you address it as
# extension_event.EXTENSION_EVENT_NAME rather than
# extension_event["EXTENSION_EVENT_NAME"]
self.extension_event = rq.DictWrapper({})
exts = self.list_extensions()
# Go through all extension modules
for extname, modname in ext.__extensions__:
if extname in exts:
# Import the module and fetch it
__import__('Xlib.ext.' + modname)
mod = getattr(ext, modname)
info = self.query_extension(extname)
self.display.set_extension_major(extname, info.major_opcode)
# Call initialiasation function
mod.init(self, info)
self.extensions.append(extname)
# Finalize extensions by creating new classes
for class_name, dictionary in self.class_extension_dicts.items():
origcls = self.display.resource_classes[class_name]
self.display.resource_classes[class_name] = type(origcls.__name__,
(origcls,),
dictionary)
# Problem: we have already created some objects without the
# extensions: the screen roots and default colormaps.
# Fix that by reinstantiating them.
for screen in self.display.info.roots:
screen.root = self.display.resource_classes['window'](self.display, screen.root.id)
screen.default_colormap = self.display.resource_classes['colormap'](self.display, screen.default_colormap.id)
def get_display_name(self):
"""Returns the name used to connect to the server, either
provided when creating the Display object, or fetched from the
environmental variable $DISPLAY."""
return self.display.get_display_name()
def fileno(self):
"""Returns the file descriptor number of the underlying socket.
This method is provided to allow Display objects to be passed
select.select()."""
return self.display.fileno()
def close(self):
"""Close the display, freeing the resources that it holds."""
self.display.close()
def set_error_handler(self, handler):
"""Set the default error handler which will be called for all
unhandled errors. handler should take two arguments as a normal
request error handler, but the second argument (the request) will
be None. See section Error Handling."""
self.display.set_error_handler(handler)
def flush(self):
"""Flush the request queue, building and sending the queued
requests. This can be necessary in applications that never wait
for events, and in threaded applications."""
self.display.flush()
def sync(self):
"""Flush the queue and wait until the server has processed all
the queued requests. Use this e.g. when it is important that
errors caused by a certain request is trapped."""
# Do a light-weight replyrequest to sync. There must
# be a better way to do it...
self.get_pointer_control()
def next_event(self):
"""Return the next event. If there are no events queued, it will
block until the next event is fetched from the server."""
return self.display.next_event()
def pending_events(self):
"""Return the number of events queued, i.e. the number of times
that Display.next_event() can be called without blocking."""
return self.display.pending_events()
def has_extension(self, extension):
"""Check if both the server and the client library support the X
extension named extension."""
return extension in self.extensions
def create_resource_object(self, type, id):
"""Create a resource object of type for the integer id. type
should be one of the following strings:
resource
drawable
window
pixmap
fontable
font
gc
colormap
cursor
This function can be used when a resource ID has been fetched
e.g. from an resource or a command line argument. Resource
objects should never be created by instantiating the appropriate
class directly, since any X extensions dynamically added by the
library will not be available.
"""
return self.display.resource_classes[type](self.display, id)
# We need this to handle display extension methods
def __getattr__(self, attr):
try:
function = self.display_extension_methods[attr]
return types.MethodType(function, self)
except KeyError:
raise AttributeError(attr)
###
### display information retrieval
###
def screen(self, sno = None):
if sno is None:
return self.display.info.roots[self.display.default_screen]
else:
return self.display.info.roots[sno]
def screen_count(self):
"""Return the total number of screens on the display."""
return len(self.display.info.roots)
def get_default_screen(self):
"""Return the number of the default screen, extracted from the
display name."""
return self.display.get_default_screen()
###
### Extension module interface
###
def extension_add_method(self, object, name, function):
"""extension_add_method(object, name, function)
Add an X extension module method. OBJECT is the type of
object to add the function to, a string from this list:
display
resource
drawable
window
pixmap
fontable
font
gc
colormap
cursor
NAME is the name of the method, a string. FUNCTION is a
normal function whose first argument is a 'self'.
"""
if object == 'display':
if hasattr(self, name):
raise AssertionError('attempting to replace display method: %s' % name)
self.display_extension_methods[name] = function
else:
class_list = (object, ) + _resource_hierarchy.get(object, ())
for class_name in class_list:
cls = _resource_baseclasses[class_name]
if hasattr(cls, name):
raise AssertionError('attempting to replace %s method: %s' % (class_name, name))
method = create_unbound_method(function, cls)
# Maybe should check extension overrides too
try:
self.class_extension_dicts[class_name][name] = method
except KeyError:
self.class_extension_dicts[class_name] = { name: method }
def extension_add_event(self, code, evt, name = None):
"""extension_add_event(code, evt, [name])
Add an extension event. CODE is the numeric code, and EVT is
the event class. EVT will be cloned, and the attribute _code
of the new event class will be set to CODE.
If NAME is omitted, it will be set to the name of EVT. This
name is used to insert an entry in the DictWrapper
extension_event.
"""
newevt = type(evt.__name__, evt.__bases__,
evt.__dict__.copy())
newevt._code = code
self.display.add_extension_event(code, newevt)
if name is None:
name = evt.__name__
setattr(self.extension_event, name, code)
def extension_add_subevent(self, code, subcode, evt, name = None):
"""extension_add_subevent(code, evt, [name])
Add an extension subevent. CODE is the numeric code, subcode
is the sub-ID of this event that shares the code ID with other
sub-events and EVT is the event class. EVT will be cloned, and
the attribute _code of the new event class will be set to CODE.
If NAME is omitted, it will be set to the name of EVT. This
name is used to insert an entry in the DictWrapper
extension_event.
"""
newevt = type(evt.__name__, evt.__bases__,
evt.__dict__.copy())
newevt._code = code
self.display.add_extension_event(code, newevt, subcode)
if name is None:
name = evt.__name__
# store subcodes as a tuple of (event code, subcode) in the
# extension dict maintained in the display object
setattr(self.extension_event, name, (code,subcode))
def add_extension_error(self, code, err):
"""add_extension_error(code, err)
Add an extension error. CODE is the numeric code, and ERR is
the error class.
"""
self.display.add_extension_error(code, err)
###
### keymap cache implementation
###
# The keycode->keysym map is stored in a list with 256 elements.
# Each element represents a keycode, and the tuple elements are
# the keysyms bound to the key.
# The keysym->keycode map is stored in a mapping, where the keys
# are keysyms. The values are a sorted list of tuples with two
# elements each: (index, keycode)
# keycode is the code for a key to which this keysym is bound, and
# index is the keysyms index in the map for that keycode.
def keycode_to_keysym(self, keycode, index):
"""Convert a keycode to a keysym, looking in entry index.
Normally index 0 is unshifted, 1 is shifted, 2 is alt grid, and 3
is shift+alt grid. If that key entry is not bound, X.NoSymbol is
returned."""
try:
return self._keymap_codes[keycode][index]
except IndexError:
return X.NoSymbol
def keysym_to_keycode(self, keysym):
"""Look up the primary keycode that is bound to keysym. If
several keycodes are found, the one with the lowest index and
lowest code is returned. If keysym is not bound to any key, 0 is
returned."""
try:
return self._keymap_syms[keysym][0][1]
except (KeyError, IndexError):
return 0
def keysym_to_keycodes(self, keysym):
"""Look up all the keycodes that is bound to keysym. A list of
tuples (keycode, index) is returned, sorted primarily on the
lowest index and secondarily on the lowest keycode."""
try:
# Copy the map list, reversing the arguments
return map(lambda x: (x[1], x[0]), self._keymap_syms[keysym])
except KeyError:
return []
def refresh_keyboard_mapping(self, evt):
"""This method should be called once when a MappingNotify event
is received, to update the keymap cache. evt should be the event
object."""
if isinstance(evt, event.MappingNotify):
if evt.request == X.MappingKeyboard:
self._update_keymap(evt.first_keycode, evt.count)
else:
raise TypeError('expected a MappingNotify event')
def _update_keymap(self, first_keycode, count):
"""Internal function, called to refresh the keymap cache.
"""
# Delete all sym->code maps for the changed codes
lastcode = first_keycode + count
for keysym, codes in self._keymap_syms.items():
i = 0
while i < len(codes):
code = codes[i][1]
if code >= first_keycode and code < lastcode:
del codes[i]
else:
i = i + 1
# Get the new keyboard mapping
keysyms = self.get_keyboard_mapping(first_keycode, count)
# Replace code->sym map with the new map
self._keymap_codes[first_keycode:lastcode] = keysyms
# Update sym->code map
code = first_keycode
for syms in keysyms:
index = 0
for sym in syms:
if sym != X.NoSymbol:
if sym in self._keymap_syms:
symcodes = self._keymap_syms[sym]
symcodes.append((index, code))
symcodes.sort()
else:
self._keymap_syms[sym] = [(index, code)]
index = index + 1
code = code + 1
###
### client-internal keysym to string translations
###
def lookup_string(self, keysym):
"""Return a string corresponding to KEYSYM, or None if no
reasonable translation is found.
"""
s = self.keysym_translations.get(keysym)
if s is not None:
return s
import Xlib.XK
return Xlib.XK.keysym_to_string(keysym)
def rebind_string(self, keysym, newstring):
"""Change the translation of KEYSYM to NEWSTRING.
If NEWSTRING is None, remove old translation if any.
"""
if newstring is None:
try:
del self.keysym_translations[keysym]
except KeyError:
pass
else:
self.keysym_translations[keysym] = newstring
###
### X requests
###
def intern_atom(self, name, only_if_exists = 0):
"""Intern the string name, returning its atom number. If
only_if_exists is true and the atom does not already exist, it
will not be created and X.NONE is returned."""
r = request.InternAtom(display = self.display,
name = name,
only_if_exists = only_if_exists)
return r.atom
def get_atom(self, atom, only_if_exists = 0):
"""Alias for intern_atom, using internal cache"""
return self.display.get_atom(atom, only_if_exists)
def get_atom_name(self, atom):
"""Look up the name of atom, returning it as a string. Will raise
BadAtom if atom does not exist."""
r = request.GetAtomName(display = self.display,
atom = atom)
return r.name
def get_selection_owner(self, selection):
"""Return the window that owns selection (an atom), or X.NONE if
there is no owner for the selection. Can raise BadAtom."""
r = request.GetSelectionOwner(display = self.display,
selection = selection)
return r.owner
def send_event(self, destination, event, event_mask = 0, propagate = 0,
onerror = None):
"""Send a synthetic event to the window destination which can be
a window object, or X.PointerWindow or X.InputFocus. event is the
event object to send, instantiated from one of the classes in
protocol.events. See XSendEvent(3X11) for details.
There is also a Window.send_event() method."""
request.SendEvent(display = self.display,
onerror = onerror,
propagate = propagate,
destination = destination,
event_mask = event_mask,
event = event)
def ungrab_pointer(self, time, onerror = None):
"""elease a grabbed pointer and any queued events. See
XUngrabPointer(3X11)."""
request.UngrabPointer(display = self.display,
onerror = onerror,
time = time)
def change_active_pointer_grab(self, event_mask, cursor, time, onerror = None):
"""Change the dynamic parameters of a pointer grab. See
XChangeActivePointerGrab(3X11)."""
request.ChangeActivePointerGrab(display = self.display,
onerror = onerror,
cursor = cursor,
time = time,
event_mask = event_mask)
def ungrab_keyboard(self, time, onerror = None):
"""Ungrab a grabbed keyboard and any queued events. See
XUngrabKeyboard(3X11)."""
request.UngrabKeyboard(display = self.display,
onerror = onerror,
time = time)
def allow_events(self, mode, time, onerror = None):
"""Release some queued events. mode should be one of
X.AsyncPointer, X.SyncPointer, X.AsyncKeyboard, X.SyncKeyboard,
X.ReplayPointer, X.ReplayKeyboard, X.AsyncBoth, or X.SyncBoth.
time should be a timestamp or X.CurrentTime."""
request.AllowEvents(display = self.display,
onerror = onerror,
mode = mode,
time = time)
def grab_server(self, onerror = None):
"""Disable processing of requests on all other client connections
until the server is ungrabbed. Server grabbing should be avoided
as much as possible."""
request.GrabServer(display = self.display,
onerror = onerror)
def ungrab_server(self, onerror = None):
"""Release the server if it was previously grabbed by this client."""
request.UngrabServer(display = self.display,
onerror = onerror)
def warp_pointer(self, x, y, src_window = X.NONE, src_x = 0, src_y = 0,
src_width = 0, src_height = 0, onerror = None):
"""Move the pointer relative its current position by the offsets
(x, y). However, if src_window is a window the pointer is only
moved if the specified rectangle in src_window contains it. If
src_width is 0 it will be replaced with the width of src_window -
src_x. src_height is treated in a similar way.
To move the pointer to absolute coordinates, use Window.warp_pointer()."""
request.WarpPointer(display = self.display,
onerror = onerror,
src_window = src_window,
dst_window = X.NONE,
src_x = src_x,
src_y = src_y,
src_width = src_width,
src_height = src_height,
dst_x = x,
dst_y = y)
def set_input_focus(self, focus, revert_to, time, onerror = None):
"""Set input focus to focus, which should be a window,
X.PointerRoot or X.NONE. revert_to specifies where the focus
reverts to if the focused window becomes not visible, and should
be X.RevertToParent, RevertToPointerRoot, or RevertToNone. See
XSetInputFocus(3X11) for details.
There is also a Window.set_input_focus()."""
request.SetInputFocus(display = self.display,
onerror = onerror,
revert_to = revert_to,
focus = focus,
time = time)
def get_input_focus(self):
"""Return an object with the following attributes:
focus
The window which currently holds the input
focus, X.NONE or X.PointerRoot.
revert_to
Where the focus will revert, one of X.RevertToParent,
RevertToPointerRoot, or RevertToNone. """
return request.GetInputFocus(display = self.display)
def query_keymap(self):
"""Return a bit vector for the logical state of the keyboard,
where each bit set to 1 indicates that the corresponding key is
currently pressed down. The vector is represented as a list of 32
integers. List item N contains the bits for keys 8N to 8N + 7
with the least significant bit in the byte representing key 8N."""
r = request.QueryKeymap(display = self.display)
return r.map
def open_font(self, name):
"""Open the font identifed by the pattern name and return its
font object. If name does not match any font, None is returned."""
fid = self.display.allocate_resource_id()
ec = error.CatchError(error.BadName)
request.OpenFont(display = self.display,
onerror = ec,
fid = fid,
name = name)
self.sync()
if ec.get_error():
self.display.free_resource_id(fid)
return None
else:
cls = self.display.get_resource_class('font', fontable.Font)
return cls(self.display, fid, owner = 1)
def list_fonts(self, pattern, max_names):
"""Return a list of font names matching pattern. No more than
max_names will be returned."""
r = request.ListFonts(display = self.display,
max_names = max_names,
pattern = pattern)
return r.fonts
def list_fonts_with_info(self, pattern, max_names):
"""Return a list of fonts matching pattern. No more than
max_names will be returned. Each list item represents one font
and has the following properties:
name
The name of the font.
min_bounds
max_bounds
min_char_or_byte2
max_char_or_byte2
default_char
draw_direction
min_byte1
max_byte1
all_chars_exist
font_ascent
font_descent
replies_hint
See the descripton of XFontStruct in XGetFontProperty(3X11)
for details on these values.
properties
A list of properties. Each entry has two attributes:
name
The atom identifying this property.
value
A 32-bit unsigned value.
"""
return request.ListFontsWithInfo(display = self.display,
max_names = max_names,
pattern = pattern)
def set_font_path(self, path, onerror = None):
"""Set the font path to path, which should be a list of strings.
If path is empty, the default font path of the server will be
restored."""
request.SetFontPath(display = self.display,
onerror = onerror,
path = path)
def get_font_path(self):
"""Return the current font path as a list of strings."""
r = request.GetFontPath(display = self.display)
return r.paths
def query_extension(self, name):
"""Ask the server if it supports the extension name. If it is
supported an object with the following attributes is returned:
major_opcode
The major opcode that the requests of this extension uses.
first_event
The base event code if the extension have additional events, or 0.
first_error
The base error code if the extension have additional errors, or 0.
If the extension is not supported, None is returned."""
r = request.QueryExtension(display = self.display,
name = name)
if r.present:
return r
else:
return None
def list_extensions(self):
"""Return a list of all the extensions provided by the server."""
r = request.ListExtensions(display = self.display)
return r.names
def change_keyboard_mapping(self, first_keycode, keysyms, onerror = None):
"""Modify the keyboard mapping, starting with first_keycode.
keysyms is a list of tuples of keysyms. keysyms[n][i] will be
assigned to keycode first_keycode+n at index i."""
request.ChangeKeyboardMapping(display = self.display,
onerror = onerror,
first_keycode = first_keycode,
keysyms = keysyms)
def get_keyboard_mapping(self, first_keycode, count):
"""Return the current keyboard mapping as a list of tuples,
starting at first_keycount and no more than count."""
r = request.GetKeyboardMapping(display = self.display,
first_keycode = first_keycode,
count = count)
return r.keysyms
def change_keyboard_control(self, onerror = None, **keys):
"""Change the parameters provided as keyword arguments:
key_click_percent
The volume of key clicks between 0 (off) and 100 (load).
-1 will restore default setting.
bell_percent
The base volume of the bell, coded as above.
bell_pitch
The pitch of the bell in Hz, -1 restores the default.
bell_duration
The duration of the bell in milliseconds, -1 restores
the default.
led
led_mode
led_mode should be X.LedModeOff or X.LedModeOn. If led is
provided, it should be a 32-bit mask listing the LEDs that
should change. If led is not provided, all LEDs are changed.
key
auto_repeat_mode
auto_repeat_mode should be one of X.AutoRepeatModeOff,
X.AutoRepeatModeOn, or X.AutoRepeatModeDefault. If key is
provided, that key will be modified, otherwise the global
state for the entire keyboard will be modified."""
request.ChangeKeyboardControl(display = self.display,
onerror = onerror,
attrs = keys)
def get_keyboard_control(self):
"""Return an object with the following attributes:
global_auto_repeat
X.AutoRepeatModeOn or X.AutoRepeatModeOff.
auto_repeats
A list of 32 integers. List item N contains the bits for keys
8N to 8N + 7 with the least significant bit in the byte
representing key 8N. If a bit is on, autorepeat is enabled
for the corresponding key.
led_mask
A 32-bit mask indicating which LEDs are on.
key_click_percent
The volume of key click, from 0 to 100.
bell_percent
bell_pitch
bell_duration
The volume, pitch and duration of the bell. """
return request.GetKeyboardControl(display = self.display)
def bell(self, percent = 0, onerror = None):
"""Ring the bell at the volume percent which is relative the base
volume. See XBell(3X11)."""
request.Bell(display = self.display,
onerror = onerror,
percent = percent)
def change_pointer_control(self, accel = None, threshold = None, onerror = None):
"""To change the pointer acceleration, set accel to a tuple (num,
denum). The pointer will then move num/denum times the normal
speed if it moves beyond the threshold number of pixels at once.
To change the threshold, set it to the number of pixels. -1
restores the default."""
if accel is None:
do_accel = 0
accel_num = 0
accel_denum = 0
else:
do_accel = 1
accel_num, accel_denum = accel
if threshold is None:
do_threshold = 0
else:
do_threshold = 1
request.ChangePointerControl(display = self.display,
onerror = onerror,
do_accel = do_accel,
do_thres = do_threshold,
accel_num = accel_num,
accel_denum = accel_denum,
threshold = threshold)
def get_pointer_control(self):
"""Return an object with the following attributes:
accel_num
accel_denom
The acceleration as numerator/denumerator.
threshold
The number of pixels the pointer must move before the
acceleration kicks in."""
return request.GetPointerControl(display = self.display)
def set_screen_saver(self, timeout, interval, prefer_blank, allow_exposures, onerror = None):
"""See XSetScreenSaver(3X11)."""
request.SetScreenSaver(display = self.display,
onerror = onerror,
timeout = timeout,
interval = interval,
prefer_blank = prefer_blank,
allow_exposures = allow_exposures)
def get_screen_saver(self):
"""Return an object with the attributes timeout, interval,
prefer_blanking, allow_exposures. See XGetScreenSaver(3X11) for
details."""
return request.GetScreenSaver(display = self.display)
def change_hosts(self, mode, host_family, host, onerror = None):
"""mode is either X.HostInsert or X.HostDelete. host_family is
one of X.FamilyInternet, X.FamilyDECnet or X.FamilyChaos.
host is a list of bytes. For the Internet family, it should be the
four bytes of an IPv4 address."""
request.ChangeHosts(display = self.display,
onerror = onerror,
mode = mode,
host_family = host_family,
host = host)
def list_hosts(self):
"""Return an object with the following attributes:
mode
X.EnableAccess if the access control list is used, X.DisableAccess otherwise.
hosts
The hosts on the access list. Each entry has the following attributes:
family
X.FamilyInternet, X.FamilyDECnet, or X.FamilyChaos.
name
A list of byte values, the coding depends on family. For the Internet family, it is the 4 bytes of an IPv4 address.
"""
return request.ListHosts(display = self.display)
def set_access_control(self, mode, onerror = None):
"""Enable use of access control lists at connection setup if mode
is X.EnableAccess, disable if it is X.DisableAccess."""
request.SetAccessControl(display = self.display,
onerror = onerror,
mode = mode)
def set_close_down_mode(self, mode, onerror = None):
"""Control what will happen with the client's resources at
connection close. The default is X.DestroyAll, the other values
are X.RetainPermanent and X.RetainTemporary."""
request.SetCloseDownMode(display = self.display,
onerror = onerror,
mode = mode)
def force_screen_saver(self, mode, onerror = None):
"""If mode is X.ScreenSaverActive the screen saver is activated.
If it is X.ScreenSaverReset, the screen saver is deactivated as
if device input had been received."""
request.ForceScreenSaver(display = self.display,
onerror = onerror,
mode = mode)
def set_pointer_mapping(self, map):
"""Set the mapping of the pointer buttons. map is a list of
logical button numbers. map must be of the same length as the
list returned by Display.get_pointer_mapping().
map[n] sets the
logical number for the physical button n+1. Logical number 0
disables the button. Two physical buttons cannot be mapped to the
same logical number.
If one of the buttons to be altered are
logically in the down state, X.MappingBusy is returned and the
mapping is not changed. Otherwise the mapping is changed and
X.MappingSuccess is returned."""
r = request.SetPointerMapping(display = self.display,
map = map)
return r.status
def get_pointer_mapping(self):
"""Return a list of the pointer button mappings. Entry N in the
list sets the logical button number for the physical button N+1."""
r = request.GetPointerMapping(display = self.display)
return r.map
def set_modifier_mapping(self, keycodes):
"""Set the keycodes for the eight modifiers X.Shift, X.Lock,
X.Control, X.Mod1, X.Mod2, X.Mod3, X.Mod4 and X.Mod5. keycodes
should be a eight-element list where each entry is a list of the
keycodes that should be bound to that modifier.
If any changed
key is logically in the down state, X.MappingBusy is returned and
the mapping is not changed. If the mapping violates some server
restriction, X.MappingFailed is returned. Otherwise the mapping
is changed and X.MappingSuccess is returned."""
r = request.SetModifierMapping(display = self.display,
keycodes = keycodes)
return r.status
def get_modifier_mapping(self):
"""Return a list of eight lists, one for each modifier. The list
can be indexed using X.ShiftMapIndex, X.Mod1MapIndex, and so on.
The sublists list the keycodes bound to that modifier."""
r = request.GetModifierMapping(display = self.display)
return r.keycodes
def no_operation(self, onerror = None):
"""Do nothing but send a request to the server."""
request.NoOperation(display = self.display,
onerror = onerror)
| [((225, 19, 225, 51), 'types.MethodType', 'types.MethodType', ({(225, 36, 225, 44): 'function', (225, 46, 225, 50): 'self'}, {}), '(function, self)', False, 'import types\n'), ((286, 25, 286, 61), 'six.create_unbound_method', 'create_unbound_method', ({(286, 47, 286, 55): 'function', (286, 57, 286, 60): 'cls'}, {}), '(function, cls)', False, 'from six import create_unbound_method\n')] |
KATO-Hiro/AtCoder | Others/qupc/qupc2014/c/main.py | cbbdb18e95110b604728a54aed83a6ed6b993fde | # -*- coding: utf-8 -*-
def main():
from string import ascii_uppercase
n, m, q_large = map(int, input().split())
s = [list(input()) for _ in range(n)]
q = [input() for _ in range(q_large)]
pos = [None for _ in range(26)]
for i in range(n):
for j in range(m):
sij = s[i][j]
if sij != "*":
index = ascii_uppercase.index(sij)
pos[index] = (i + 1, j + 1)
for qi in q:
index = ascii_uppercase.index(qi)
p = pos[index]
if p is None:
print("NA")
else:
print(p[0], p[1])
if __name__ == "__main__":
main()
| [((20, 16, 20, 41), 'string.ascii_uppercase.index', 'ascii_uppercase.index', ({(20, 38, 20, 40): 'qi'}, {}), '(qi)', False, 'from string import ascii_uppercase\n'), ((16, 24, 16, 50), 'string.ascii_uppercase.index', 'ascii_uppercase.index', ({(16, 46, 16, 49): 'sij'}, {}), '(sij)', False, 'from string import ascii_uppercase\n')] |
dimddev/NetCatKS-CP | NetCatKS/DProtocol/api/interfaces/subscribers/__init__.py | 2d9e72b2422e344569fd4eb154866b98e9707561 | __author__ = 'dimd'
from zope.interface import Interface, Attribute
class IBaseResourceSubscriber(Interface):
"""
IBaseResourceSubscriber provides functionality for comparison of the signature on
a incoming request against a candidate DProtocol implementation registered as
IJSONResource
The `adapter` is our first argument in the constructor. It's used from the adapter pattern
and have to be from type IJSONResource
The `protocol` attribute is designed to be provided by classes which are implements IJSONResourceSubscriber,
or inherit from DProtocolSubscriber. If subclass does not provide the protocol argument will
raise AttributeError.
"""
adapter = Attribute("The implementer have to provide implementation of IJSONResource")
protocol = Attribute("DProtocol instance")
def compare():
"""
Designed to compare the the adapter and the DProtocol signature
if the signatures is equal
"""
class IJSONResourceSubscriber(Interface):
"""
"""
class IXMLResourceSubscriber(Interface):
"""
""" | [((23, 14, 23, 90), 'zope.interface.Attribute', 'Attribute', ({(23, 24, 23, 89): '"""The implementer have to provide implementation of IJSONResource"""'}, {}), "('The implementer have to provide implementation of IJSONResource')", False, 'from zope.interface import Interface, Attribute\n'), ((24, 15, 24, 46), 'zope.interface.Attribute', 'Attribute', ({(24, 25, 24, 45): '"""DProtocol instance"""'}, {}), "('DProtocol instance')", False, 'from zope.interface import Interface, Attribute\n')] |
dpedrosac/DBSgait | analysis/notebooks/helper/anova.py | 6df44cf975d43f9e932ef10144bfb7c1b5390b7b | import numpy as np
import pandas as pd
from scipy.stats import f_oneway
from typing import Dict, Tuple, Set
def extract_significant_p(df: pd.DataFrame, p_value_limit: float):
"""Return a df, which replaces values that are above p_value_limit with `None`"""
return (
df.loc(axis=1)[f"p-value"]
.where(df[f"p-value"] < p_value_limit)
.dropna(axis=0, how="all")
)
def _calculate_anova(data: pd.DataFrame) -> Tuple:
"""Calculate one-way anova using each column as a different measurement."""
parameter = [column for column in data.columns if column != "configuration"][0]
data_ = [
data[data["configuration"] == configuration][parameter].T.to_numpy()
for configuration in set(data["configuration"])
]
return f_oneway(*data_)
def anova(
dataset: Dict, gait_test: str, gait_parameter: str
) -> Tuple[pd.DataFrame, Set]:
"""Calculat a one-way anova for a single gait test and gait parameter.
Parameters
----------
dataset
A dictionary, where the keys are descriptions for different subjects. The values are dataframes, which have a
pd.MultiIndex as columns. The first level describes the test paradigm, e.g. "slow" / "fast". The second level
describes the DBS configureation, e.g. "130", "100", "OFF". The third level is the gait parameter,
e.g. stride length.
gait_test
Used to select the first level of the columns
gait_parameter
Used to select the thrid level of the columns
Returns
-------
d
A dictionary where the keys are equal to the passed argument `dataset`. The values are dataframes,
where the columns correspond to the two feet and the rows are different gait parameters. The values are anova
p-values between all DBS configurations and the OFF state for this specific `gait_test`
"""
anova_dict = {}
anova_df = pd.DataFrame()
not_evaluated = []
for patient, patient_data in dataset.items():
anova_dict[patient] = {"LeftFoot": (None, None), "RightFoot": (None, None)}
for foot in set(patient_data["foot"]):
missing_condition = None
foot_data = patient_data[
(patient_data["foot"] == foot) & (patient_data["test"] == gait_test)
][[gait_parameter, "configuration"]]
possible_configurations = {
"030",
"033",
"040",
"066",
"085",
"090",
"100",
"130",
"OFF",
}
actual_configurations = set(foot_data["configuration"])
missing_configurations = possible_configurations - actual_configurations
if missing_configurations:
not_evaluated.append(
" ".join([gait_test, patient, *missing_configurations, foot])
)
if len(missing_configurations) > (len(possible_configurations) - 2):
print(
"Not evaluating this foot, because to few configurations available."
)
continue
# print(set(foot_data.columns) - set(foot_data_valid.columns))
anova_dict[patient][foot] = _calculate_anova(foot_data)
row = pd.DataFrame(
index=[patient],
columns=pd.MultiIndex.from_arrays(
[["p-value"] * 2, ["LeftFoot", "RightFoot"]]
),
data=[
[
anova_dict[patient]["LeftFoot"][1],
anova_dict[patient]["RightFoot"][1],
]
],
)
anova_df = pd.concat([anova_df, row])
return anova_df, set(not_evaluated)
def conclude_results(
all_results: pd.DataFrame,
p_value_limit: float
) -> pd.DataFrame:
anova_overview = pd.DataFrame()
significant_results = {}
for gait_parameter in all_results.keys():
significant_results[gait_parameter] = extract_significant_p(
all_results[gait_parameter], p_value_limit=p_value_limit
)
data = [
len(all_results[gait_parameter]),
len(significant_results[gait_parameter]),
significant_results[gait_parameter].count().sum(),
]
columns = ["n_patients", "n_patients_significant", "n_feet_significant"]
anova_overview = pd.concat(
[
anova_overview,
pd.DataFrame(data=[data], columns=columns, index=[gait_parameter]),
]
)
return anova_overview
| [((24, 11, 24, 27), 'scipy.stats.f_oneway', 'f_oneway', ({(24, 20, 24, 26): '*data_'}, {}), '(*data_)', False, 'from scipy.stats import f_oneway\n'), ((54, 15, 54, 29), 'pandas.DataFrame', 'pd.DataFrame', ({}, {}), '()', True, 'import pandas as pd\n'), ((109, 21, 109, 35), 'pandas.DataFrame', 'pd.DataFrame', ({}, {}), '()', True, 'import pandas as pd\n'), ((101, 19, 101, 45), 'pandas.concat', 'pd.concat', ({(101, 29, 101, 44): '[anova_df, row]'}, {}), '([anova_df, row])', True, 'import pandas as pd\n'), ((91, 20, 93, 13), 'pandas.MultiIndex.from_arrays', 'pd.MultiIndex.from_arrays', ({(92, 16, 92, 60): "[['p-value'] * 2, ['LeftFoot', 'RightFoot']]"}, {}), "([['p-value'] * 2, ['LeftFoot', 'RightFoot']])", True, 'import pandas as pd\n'), ((124, 16, 124, 82), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n')] |
roaldarbol/bux | bux_recorder/utils.py | 356817bbc7139c972d640c64fb8fcba27b70b3f7 | import os
import platform
import time
import csv
import serial
import cv2
import tkinter as tk
from tkinter.filedialog import askdirectory
from serial.tools import list_ports
# From https://raspberrypi.stackexchange.com/a/118473
def is_raspberrypi():
try:
with io.open('/sys/firmware/devicetree/base/model', 'r') as m:
if 'raspberry pi' in m.read().lower():
return(m)
except Exception:
pass
return False
def get_platform():
return platform.system()
def get_gui_coordinates(root, w, h):
# get screen width and height
ws = root.winfo_screenwidth() # width of the screen
hs = root.winfo_screenheight() # height of the screen
# calculate x and y coordinates for the Tk root window
x = (ws/2) - (w/2)
y = (hs/2) - (h/2)
return(w,h,x,y)
def handle_focus_in(button):
full_name_entry.delete(0, tk.END)
full_name_entry.config(fg='black')
def handle_focus_out(button):
full_name_entry.delete(0, tk.END)
full_name_entry.config(fg='grey')
full_name_entry.insert(0, "Example: Joe Bloggs")
def hover(button, enter, message):
if message == "":
return
else:
button.configure(text=message)
def list_ports():
"""
Test the ports and returns a tuple with the available ports and the ones that are working.
"""
non_working_ports = []
dev_port = 0
working_ports = []
available_ports = []
while len(non_working_ports) < 6: # if there are more than 5 non working ports stop the testing.
camera = cv2.VideoCapture(dev_port)
if not camera.isOpened():
non_working_ports.append(dev_port)
# print("Port %s is not working." %dev_port)
else:
is_reading, img = camera.read()
w = camera.get(3)
h = camera.get(4)
if is_reading:
# print("Port %s is working and reads images (%s x %s)" %(dev_port,h,w))
working_ports.append(dev_port)
else:
# print("Port %s for camera ( %s x %s) is present but does not reads." %(dev_port,h,w))
available_ports.append(dev_port)
dev_port +=1
return available_ports,working_ports,non_working_ports | [((22, 11, 22, 28), 'platform.system', 'platform.system', ({}, {}), '()', False, 'import platform\n'), ((58, 17, 58, 43), 'cv2.VideoCapture', 'cv2.VideoCapture', ({(58, 34, 58, 42): 'dev_port'}, {}), '(dev_port)', False, 'import cv2\n')] |
maechler/a2e | a2e/optimizer/hpbandster/_model_worker.py | c28f546ca5fc3fdb9c740ea5f0f85d2aca044a00 | from hpbandster.core.worker import Worker
from a2e.model import AbstractModel
from a2e.optimizer import EvaluationResultAggregator
from a2e.utility import inf_nan_to_float_max
class ModelWorker(Worker):
def __init__(
self,
model: AbstractModel,
evaluation_result_aggregator: EvaluationResultAggregator,
x_train,
y_train,
x_valid,
y_valid,
run_id,
nameserver=None,
nameserver_port=None,
logger=None,
host=None,
id=None,
timeout=None,
):
super().__init__(run_id, nameserver=nameserver, nameserver_port=nameserver_port, logger=logger, host=host, id=id, timeout=timeout)
self.model = model
self.evaluation_result_aggregator = evaluation_result_aggregator
self.x_train = x_train
self.y_train = y_train
self.x_valid = x_valid
self.y_valid = y_valid
def compute(self, config, budget, working_directory, **kwargs):
iteration, stage, actual_num_config = kwargs['config_id']
self.model.load_config(config, budget=budget, **kwargs)
evaluation_result = self.model.evaluate(
self.x_train,
self.y_train,
self.x_valid,
self.y_valid,
budget,
)
evaluation_result.add_info('iteration', iteration)
evaluation_result.add_info('stage', stage)
evaluation_result.add_info('actual_num_config', actual_num_config)
self.evaluation_result_aggregator.add_evaluation_result(evaluation_result)
return {
'loss': inf_nan_to_float_max(evaluation_result.cost),
'info': evaluation_result.info,
}
| [((51, 20, 51, 64), 'a2e.utility.inf_nan_to_float_max', 'inf_nan_to_float_max', ({(51, 41, 51, 63): 'evaluation_result.cost'}, {}), '(evaluation_result.cost)', False, 'from a2e.utility import inf_nan_to_float_max\n')] |
schissmantics/xagents | xagents/__init__.py | 04f1b96f767903c62138b7d63986f16edfe5f240 | from xagents import a2c, acer, ddpg, dqn, ppo, td3, trpo
from xagents.a2c.agent import A2C
from xagents.acer.agent import ACER
from xagents.base import OffPolicy
from xagents.ddpg.agent import DDPG
from xagents.dqn.agent import DQN
from xagents.ppo.agent import PPO
from xagents.td3.agent import TD3
from xagents.trpo.agent import TRPO
from xagents.utils.cli import play_args, train_args, tune_args
from xagents.utils.common import register_models
__author__ = 'schissmantics'
__email__ = '[email protected]'
__license__ = 'MIT'
__version__ = '1.0.1'
agents = {
'a2c': {'module': a2c, 'agent': A2C},
'acer': {'module': acer, 'agent': ACER},
'dqn': {'module': dqn, 'agent': DQN},
'ppo': {'module': ppo, 'agent': PPO},
'td3': {'module': td3, 'agent': TD3},
'trpo': {'module': trpo, 'agent': TRPO},
'ddpg': {'module': ddpg, 'agent': DDPG},
}
register_models(agents)
commands = {
'train': (train_args, 'fit', 'Train given an agent and environment'),
'play': (
play_args,
'play',
'Play a game given a trained agent and environment',
),
'tune': (
tune_args,
'',
'Tune hyperparameters given an agent, hyperparameter specs, and environment',
),
}
| [((27, 0, 27, 23), 'xagents.utils.common.register_models', 'register_models', ({(27, 16, 27, 22): 'agents'}, {}), '(agents)', False, 'from xagents.utils.common import register_models\n')] |
YonLiud/Israeli-Queue | IsraeliQueue/__init__.py | 53e14e68701c06efdd23ba6584a2e8a561e60cd9 | from .IsraeliQueue import IsraeliQueue, Item, IsraeliQueueByType
| [] |
Este1le/fairseq | examples/MMPT/mmpt_cli/localjob.py | 0fa073e0e0ddd90ff6850588e655c9566bb222ff | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from mmpt.utils import recursive_config
class BaseJob(object):
def __init__(self, yaml_file, dryrun=False):
self.yaml_file = yaml_file
self.config = recursive_config(yaml_file)
self.dryrun = dryrun
def submit(self, **kwargs):
raise NotImplementedError
def _normalize_cmd(self, cmd_list):
cmd_list = list(cmd_list)
yaml_index = cmd_list.index("[yaml]")
cmd_list[yaml_index] = self.yaml_file
return cmd_list
class LocalJob(BaseJob):
CMD_CONFIG = {
"local_single": [
"fairseq-train", "[yaml]", "--user-dir", "mmpt",
"--task", "mmtask", "--arch", "mmarch",
"--criterion", "mmloss",
],
"local_small": [
"fairseq-train", "[yaml]", "--user-dir", "mmpt",
"--task", "mmtask", "--arch", "mmarch",
"--criterion", "mmloss",
"--distributed-world-size", "2"
],
"local_big": [
"fairseq-train", "[yaml]", "--user-dir", "mmpt",
"--task", "mmtask", "--arch", "mmarch",
"--criterion", "mmloss",
"--distributed-world-size", "4"
],
"local_predict": ["python", "mmpt_cli/predict.py", "[yaml]"],
}
def __init__(self, yaml_file, job_type=None, dryrun=False):
super().__init__(yaml_file, dryrun)
if job_type is None:
self.job_type = "local_single"
if self.config.task_type is not None:
self.job_type = self.config.task_type
else:
self.job_type = job_type
if self.job_type in ["local_single", "local_small"]:
if self.config.fairseq.dataset.batch_size > 32:
print("decreasing batch_size to 32 for local testing?")
def submit(self):
cmd_list = self._normalize_cmd(LocalJob.CMD_CONFIG[self.job_type])
if "predict" not in self.job_type:
# append fairseq args.
from mmpt.utils import load_config
config = load_config(config_file=self.yaml_file)
for field in config.fairseq:
for key in config.fairseq[field]:
if key in ["fp16", "reset_optimizer", "reset_dataloader", "reset_meters"]: # a list of binary flag.
param = ["--" + key.replace("_", "-")]
else:
if key == "lr":
value = str(config.fairseq[field][key][0])
elif key == "adam_betas":
value = "'"+str(config.fairseq[field][key])+"'"
else:
value = str(config.fairseq[field][key])
param = [
"--" + key.replace("_", "-"),
value
]
cmd_list.extend(param)
print("launching", " ".join(cmd_list))
if not self.dryrun:
os.system(" ".join(cmd_list))
return JobStatus("12345678")
class JobStatus(object):
def __init__(self, job_id):
self.job_id = job_id
def __repr__(self):
return self.job_id
def __str__(self):
return self.job_id
def done(self):
return False
def running(self):
return False
def result(self):
if self.done():
return "{} is done.".format(self.job_id)
else:
return "{} is running.".format(self.job_id)
def stderr(self):
return self.result()
def stdout(self):
return self.result()
| [((13, 22, 13, 49), 'mmpt.utils.recursive_config', 'recursive_config', ({(13, 39, 13, 48): 'yaml_file'}, {}), '(yaml_file)', False, 'from mmpt.utils import recursive_config\n'), ((67, 21, 67, 60), 'mmpt.utils.load_config', 'load_config', (), '', False, 'from mmpt.utils import load_config\n')] |
sdss/tron | tron/Nubs/deprecated/tcc25m-old.py | 886c5c5fb6341ad85e4a9f5d6f5ecb6bbc0d8322 | import os.path
from tron import g, hub
from tron.Hub.Command.Encoders.ASCIICmdEncoder import ASCIICmdEncoder
from tron.Hub.Nub.TCCShellNub import TCCShellNub
from tron.Hub.Reply.Decoders.ASCIIReplyDecoder import ASCIIReplyDecoder
name = 'tcc'
def start(poller):
stop()
initCmds = ('show version', 'show users', 'show time', 'show status', 'show inst/full',
'show object/full', 'show axisconfig', 'show focus', 'axis status', 'show scale',
'mir status')
safeCmds = r'(^show )|(status$)'
d = ASCIIReplyDecoder(EOL='\r', stripChars='\n', CIDfirst=False, debug=1)
e = ASCIICmdEncoder(EOL='\r', debug=1, CIDfirst=False)
tcc = TCCShellNub(poller, [
'/usr/bin/ssh', '-1', '-e', 'none', '-a', '-x', '-i',
os.path.expanduser('~/.ssh/tron'), '-T', 'tccuser@tcc25m'
],
initCmds=initCmds,
safeCmds=safeCmds,
needsAuth=True,
name=name,
encoder=e,
decoder=d,
logDir=os.path.join(g.logDir, name),
debug=1)
hub.addActor(tcc)
def stop():
n = hub.findActor(name)
if n:
hub.dropActor(n)
del n
| [((22, 8, 22, 77), 'tron.Hub.Reply.Decoders.ASCIIReplyDecoder.ASCIIReplyDecoder', 'ASCIIReplyDecoder', (), '', False, 'from tron.Hub.Reply.Decoders.ASCIIReplyDecoder import ASCIIReplyDecoder\n'), ((23, 8, 23, 58), 'tron.Hub.Command.Encoders.ASCIICmdEncoder.ASCIICmdEncoder', 'ASCIICmdEncoder', (), '', False, 'from tron.Hub.Command.Encoders.ASCIICmdEncoder import ASCIICmdEncoder\n'), ((37, 4, 37, 21), 'tron.hub.addActor', 'hub.addActor', ({(37, 17, 37, 20): 'tcc'}, {}), '(tcc)', False, 'from tron import g, hub\n'), ((41, 8, 41, 27), 'tron.hub.findActor', 'hub.findActor', ({(41, 22, 41, 26): 'name'}, {}), '(name)', False, 'from tron import g, hub\n'), ((43, 8, 43, 24), 'tron.hub.dropActor', 'hub.dropActor', ({(43, 22, 43, 23): 'n'}, {}), '(n)', False, 'from tron import g, hub\n')] |
ptphp/PyLib | src/PtDb/test.py | 07ac99cf2deb725475f5771b123b9ea1375f5e65 | #!/usr/bin/env python
# -*- coding=utf-8 -*-
'''
Created on 2013-3-31
@author: Joseph
'''
import PtDb
if __name__ == '__main__':
PtDb.config = {
'sqlite':{
'type':'sqlite',
'dbname':"data1.db"
},
'default':{
'type':'mysql',
'host':'localhost',
'port':3306,
'dbname':'game110_dev',
'dbuser':'root',
'dbpass':'root',
'charset':'utf8',
},
'default1':{
'type':'mysql',
'host':'localhost',
'port':3306,
'dbname':'game110_dev',
'dbuser':'root',
'dbpass':'root',
'charset':'utf8',
},
}
PtDb.init('sqlite').open("test.db")
PtDb.init('sqlite').open("test1.db")
PtDb.init()
print PtDb.init().getAll("select * from orders")
print PtDb.init().getOne("select * from orders limit 1")
| [] |
honzajavorek/oci-cli | services/object_storage/tests/integ/test_object_storage_bulk_operations.py | 6ea058afba323c6b3b70e98212ffaebb0d31985e | # coding: utf-8
# Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
import filecmp
import json
import pytest
import oci
import services.object_storage.src.oci_cli_object_storage as oci_cli_object_storage
import os
import random
import shutil
import six
import string
from tests import util
from tests import test_config_container
from mimetypes import guess_type
OBJECTS_TO_CREATE_IN_BUCKET_FOR_BULK_GET = 100
OBJECTS_TO_CREATE_IN_FOLDER_FOR_BULK_PUT = 20
CONTENT_STRING_LENGTH = 5000
MID_SIZED_FILE_IN_MEBIBTYES = 20
LARGE_CONTENT_FILE_SIZE_IN_MEBIBYTES = 150 # Default multipart is 128MiB
# Holds the objects we create and their content so that we can verify results
bulk_get_object_to_content = {}
bulk_get_prefix_to_object = {
'a/b/c/d': [],
'a/b/c': [],
'a/b': [],
'/a': [],
'': []
}
bulk_get_bucket_name = None
bulk_put_large_files = set()
bulk_put_mid_sized_files = set()
root_bulk_put_folder = None
bulk_put_bucket_name = None
@pytest.fixture
def vcr_fixture(request):
with test_config_container.create_vcr(cassette_library_dir='services/object_storage/tests/cassettes').use_cassette('object_storage_bulk_operations_{name}.yml'.format(name=request.function.__name__)):
yield
# Generate test data for different operations:
#
# Bulk Get: create a new bucket and populate it with some objects, then tear it all down afterwards
# Bulk Put: create a folder structure containing small and large files, then tear it all down afterwards
# Bulk Delete: uses the folders and files generated for bulk put
@pytest.fixture(scope='module', autouse=True)
def generate_test_data(object_storage_client):
global bulk_get_object_to_content, bulk_get_bucket_name, root_bulk_put_folder, bulk_put_large_files, bulk_put_mid_sized_files, bulk_put_bucket_name
# Create a test bucket
create_bucket_request = oci.object_storage.models.CreateBucketDetails()
create_bucket_request.name = 'ObjectStorageBulkGetTest_{}'.format(util.random_number_string())
create_bucket_request.compartment_id = util.COMPARTMENT_ID
util.clear_test_data(object_storage_client, util.NAMESPACE, util.COMPARTMENT_ID, create_bucket_request.name)
object_storage_client.create_bucket(util.NAMESPACE, create_bucket_request)
bulk_get_bucket_name = create_bucket_request.name
# Create items at various heirarchy levels (to be surfaced as different directories on disk)
for i in range(OBJECTS_TO_CREATE_IN_BUCKET_FOR_BULK_GET):
if i % 5 == 4:
object_name = 'a/b/c/d/Object_{}'.format(i)
bulk_get_prefix_to_object['a/b/c/d'].append(object_name)
elif i % 5 == 3:
object_name = 'a/b/c/Object_{}'.format(i)
bulk_get_prefix_to_object['a/b/c'].append(object_name)
elif i % 5 == 2:
object_name = 'a/b/Object_{}'.format(i)
bulk_get_prefix_to_object['a/b'].append(object_name)
elif i % 5 == 1:
# This is equivalent to a/ on the file system because we drop the leading slash (we drop path separators from the front to avoid unexpected results)
object_name = '/a/Object_{}'.format(i)
bulk_get_prefix_to_object['/a'].append(object_name)
else:
# At the root of the bucket
object_name = 'Object_{}'.format(i)
bulk_get_prefix_to_object[''].append(object_name)
object_content = generate_random_string(CONTENT_STRING_LENGTH)
object_storage_client.put_object(util.NAMESPACE, create_bucket_request.name, object_name, object_content)
bulk_get_object_to_content[object_name] = object_content
# makedirs creates all subfolders recursively
root_bulk_put_folder = 'tests/temp/bulk_put_{}'.format(util.random_number_string())
bulk_put_folder_leaf = '{}/subfolder1/subfolder2/subfolder3'.format(root_bulk_put_folder)
if not os.path.exists(bulk_put_folder_leaf):
os.makedirs(bulk_put_folder_leaf)
create_bucket_request = oci.object_storage.models.CreateBucketDetails()
create_bucket_request.name = 'ObjectStorageBulkPutTest_{}'.format(util.random_number_string())
create_bucket_request.compartment_id = util.COMPARTMENT_ID
util.clear_test_data(object_storage_client, util.NAMESPACE, util.COMPARTMENT_ID, create_bucket_request.name)
object_storage_client.create_bucket(util.NAMESPACE, create_bucket_request)
bulk_put_bucket_name = create_bucket_request.name
subfolders = ['', 'subfolder1', 'subfolder1/subfolder2', 'subfolder1/subfolder2/subfolder3']
for subfolder in subfolders:
if subfolder == '':
full_folder = root_bulk_put_folder
else:
full_folder = os.path.join(root_bulk_put_folder, subfolder)
for i in range(OBJECTS_TO_CREATE_IN_FOLDER_FOR_BULK_PUT + 1):
file_path = '{}/object_{}'.format(full_folder, i)
if i != 0 and i % OBJECTS_TO_CREATE_IN_FOLDER_FOR_BULK_PUT == 0:
# Put in one big file per subfolder
util.create_large_file(file_path, LARGE_CONTENT_FILE_SIZE_IN_MEBIBYTES)
bulk_put_large_files.add(file_path)
elif i != 0 and i % 10 == 0:
# Put in the occasional file with a reasonable size so that we can force multipart
util.create_large_file(file_path, MID_SIZED_FILE_IN_MEBIBTYES)
bulk_put_mid_sized_files.add(file_path)
else:
with open(file_path, 'w') as f:
f.write(generate_random_string(CONTENT_STRING_LENGTH))
yield
# Tear down stuff by deleting all the things and then deleting the buckets
delete_bucket_and_all_items(object_storage_client, bulk_get_bucket_name)
delete_bucket_and_all_items(object_storage_client, bulk_put_bucket_name)
# Remove all directories recursively
shutil.rmtree(root_bulk_put_folder)
@util.skip_while_rerecording
def test_normalize_object_name_path():
assert '/this/is/a/path' == oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage('/this/is/a/path')
assert '/this/is/a/path' == oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage('/this/is/a/path', '/')
assert '/this/is/a/path' == oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage('\\this\\is\\a\\path', '\\')
assert '/this/is/a/path' == oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage('\\this/is/a\\path', '\\')
assert 'thisisapath' == oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage('thisisapath')
assert 'thisisapath' == oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage('thisisapath', '/')
assert 'thisisapath' == oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage('thisisapath', '\\')
@util.skip_while_rerecording
def test_get_all_objects_in_bucket(vcr_fixture):
download_folder = 'tests/temp/get_all_{}'.format(bulk_get_bucket_name)
result = invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--download-dir', download_folder])
print(result.output)
# Ensure that content matches
for object_name in bulk_get_object_to_content:
if object_name[0] == '/' or object_name[0] == '\\':
file_path = os.path.join(download_folder, object_name[1:])
else:
file_path = os.path.join(download_folder, object_name)
with open(file_path, 'r') as content_file:
content = content_file.read()
assert content == bulk_get_object_to_content[object_name]
assert len(bulk_get_object_to_content) == get_count_of_files_in_folder_and_subfolders(download_folder)
shutil.rmtree(download_folder)
@util.skip_while_rerecording
def test_get_directory_and_subdirectories(vcr_fixture):
download_folder = 'tests/temp/get_directory_and_subdirectories_{}'.format(bulk_get_bucket_name)
# This should get us a/b/<object>, a/b/c/<object> and a/b/c/d/<object>
invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--download-dir', download_folder, '--prefix', 'a/b'])
for object_name in bulk_get_prefix_to_object['a/b']:
file_path = os.path.join(download_folder, object_name)
with open(file_path, 'r') as content_file:
content = content_file.read()
assert content == bulk_get_object_to_content[object_name]
for object_name in bulk_get_prefix_to_object['a/b/c']:
file_path = os.path.join(download_folder, object_name)
with open(file_path, 'r') as content_file:
content = content_file.read()
assert content == bulk_get_object_to_content[object_name]
for object_name in bulk_get_prefix_to_object['a/b/c/d']:
file_path = os.path.join(download_folder, object_name)
with open(file_path, 'r') as content_file:
content = content_file.read()
assert content == bulk_get_object_to_content[object_name]
assert len(bulk_get_prefix_to_object['a/b']) + len(bulk_get_prefix_to_object['a/b/c']) + len(bulk_get_prefix_to_object['a/b/c/d']) == get_count_of_files_in_folder_and_subfolders(download_folder)
shutil.rmtree(download_folder)
@util.skip_while_rerecording
def test_get_directory_no_subdirectory(vcr_fixture):
download_folder = 'tests/temp/get_directory_only_{}'.format(bulk_get_bucket_name)
invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--download-dir', download_folder, '--prefix', 'a/b/c/', '--delimiter', '/'])
for object_name in bulk_get_prefix_to_object['a/b/c']:
file_path = os.path.join(download_folder, object_name)
with open(file_path, 'r') as content_file:
content = content_file.read()
assert content == bulk_get_object_to_content[object_name]
assert len(bulk_get_prefix_to_object['a/b/c']) == get_count_of_files_in_folder_and_subfolders(download_folder)
shutil.rmtree(download_folder)
@util.skip_while_rerecording
def test_get_files_skipped():
download_folder = 'tests/temp/skip_and_replace_{}'.format(bulk_get_bucket_name)
invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--download-dir', download_folder])
# Sanity check
assert len(bulk_get_object_to_content) == get_count_of_files_in_folder_and_subfolders(download_folder)
# We should skip over all objects since there is no --overwrite. There should be prompts
result = invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--download-dir', download_folder])
parsed_result = parse_json_response_from_mixed_output(result.output)
assert 'Are you sure you want to overwrite it?' in result.output
assert len(parsed_result['skipped-objects']) == len(bulk_get_object_to_content)
# We should skip over all objects since we say --no-overwrite. Additionally there should be no prompts
result = invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--download-dir', download_folder, '--no-overwrite'])
parsed_result = parse_json_response_from_mixed_output(result.output)
assert 'Are you sure you want to overwrite it?' not in result.output
assert len(parsed_result['skipped-objects']) == len(bulk_get_object_to_content)
# We should skip over no objects since we --overwrite
result = invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--download-dir', download_folder, '--overwrite'])
parsed_result = parse_json_response_from_mixed_output(result.output)
assert len(parsed_result['skipped-objects']) == 0
shutil.rmtree(download_folder)
@util.skip_while_rerecording
def test_get_no_objects(vcr_fixture):
download_folder = 'tests/temp/no_objects_{}'.format(bulk_get_bucket_name)
invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--download-dir', download_folder, '--prefix', 'batman'])
assert 0 == get_count_of_files_in_folder_and_subfolders(download_folder)
shutil.rmtree(download_folder)
@util.skip_while_rerecording
def test_get_multipart(object_storage_client):
create_bucket_request = oci.object_storage.models.CreateBucketDetails()
create_bucket_request.name = 'ObjectStorageBulkGetMultipartsTest_{}'.format(util.random_number_string())
create_bucket_request.compartment_id = util.COMPARTMENT_ID
util.clear_test_data(object_storage_client, util.NAMESPACE, util.COMPARTMENT_ID, create_bucket_request.name)
object_storage_client.create_bucket(util.NAMESPACE, create_bucket_request)
large_file_root_dir = os.path.join('tests', 'temp', 'multipart_get_large_files')
if not os.path.exists(large_file_root_dir):
os.makedirs(large_file_root_dir)
util.create_large_file(os.path.join(large_file_root_dir, '1.bin'), LARGE_CONTENT_FILE_SIZE_IN_MEBIBYTES)
util.create_large_file(os.path.join(large_file_root_dir, '2.bin'), LARGE_CONTENT_FILE_SIZE_IN_MEBIBYTES)
util.create_large_file(os.path.join(large_file_root_dir, '3.bin'), LARGE_CONTENT_FILE_SIZE_IN_MEBIBYTES)
util.create_large_file(os.path.join(large_file_root_dir, '4.bin'), LARGE_CONTENT_FILE_SIZE_IN_MEBIBYTES)
util.create_large_file(os.path.join(large_file_root_dir, '5.bin'), LARGE_CONTENT_FILE_SIZE_IN_MEBIBYTES)
util.create_large_file(os.path.join(large_file_root_dir, '6.bin'), 1) # Creates a 1 MiB file for variety
invoke([
'os', 'object', 'bulk-upload',
'--namespace', util.NAMESPACE,
'--bucket-name', create_bucket_request.name,
'--src-dir', large_file_root_dir
])
large_file_verify_dir = os.path.join('tests', 'temp', 'multipart_get_large_files_verify')
invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', create_bucket_request.name, '--download-dir', large_file_verify_dir, '--multipart-download-threshold', '128'])
assert get_count_of_files_in_folder_and_subfolders(large_file_verify_dir) == 6
assert filecmp.cmp(os.path.join(large_file_root_dir, '1.bin'), os.path.join(large_file_verify_dir, '1.bin'))
assert filecmp.cmp(os.path.join(large_file_root_dir, '2.bin'), os.path.join(large_file_verify_dir, '2.bin'))
assert filecmp.cmp(os.path.join(large_file_root_dir, '3.bin'), os.path.join(large_file_verify_dir, '3.bin'))
assert filecmp.cmp(os.path.join(large_file_root_dir, '4.bin'), os.path.join(large_file_verify_dir, '4.bin'))
assert filecmp.cmp(os.path.join(large_file_root_dir, '5.bin'), os.path.join(large_file_verify_dir, '5.bin'))
assert filecmp.cmp(os.path.join(large_file_root_dir, '6.bin'), os.path.join(large_file_verify_dir, '6.bin'))
shutil.rmtree(large_file_root_dir)
shutil.rmtree(large_file_verify_dir)
delete_bucket_and_all_items(object_storage_client, create_bucket_request.name)
# Since we've created a reasonable number of objects in this test suite, it's a good opportunity to test using the --all and --limit parameters
@util.skip_while_rerecording
def test_list_all_objects_operations(vcr_fixture):
result = invoke(['os', 'object', 'list', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--all'])
parsed_result = json.loads(result.output)
assert len(parsed_result['data']) == OBJECTS_TO_CREATE_IN_BUCKET_FOR_BULK_GET
assert 'next-start-with' not in result.output
result = invoke(['os', 'object', 'list', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--all', '--page-size', '20'])
parsed_result = json.loads(result.output)
assert len(parsed_result['data']) == OBJECTS_TO_CREATE_IN_BUCKET_FOR_BULK_GET
assert 'next-start-with' not in result.output
result = invoke(['os', 'object', 'list', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--limit', '47'])
parsed_result = json.loads(result.output)
assert len(parsed_result['data']) == 47
assert 'next-start-with' in result.output
result = invoke(['os', 'object', 'list', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--limit', '33', '--page-size', '3'])
parsed_result = json.loads(result.output)
assert len(parsed_result['data']) == 33
assert 'next-start-with' in result.output
# Bulk puts objects, uses multipart where appropriate (when we breach the default of 128MiB)
@util.skip_while_rerecording
def test_bulk_put_default_options():
result = invoke(['os', 'object', 'bulk-upload', '--namespace', util.NAMESPACE, '--bucket-name', bulk_put_bucket_name, '--src-dir', root_bulk_put_folder])
# No failures or skips and we uploaded everything
parsed_result = parse_json_response_from_mixed_output(result.output)
assert parsed_result['skipped-objects'] == []
assert parsed_result['upload-failures'] == {}
assert len(parsed_result['uploaded-objects']) == get_count_of_files_in_folder_and_subfolders(root_bulk_put_folder)
# Pull everything down and verify that the files match (everything in source appears in destination and they are equal)
download_folder = 'tests/temp/verify_files_{}'.format(bulk_put_bucket_name)
invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', bulk_put_bucket_name, '--download-dir', download_folder])
object_name_set = set()
for dir_name, subdir_list, file_list in os.walk(root_bulk_put_folder):
for file in file_list:
source_file_path = os.path.join(dir_name, file)
downloaded_file_path = source_file_path.replace(root_bulk_put_folder, download_folder)
assert os.path.exists(downloaded_file_path)
assert filecmp.cmp(source_file_path, downloaded_file_path, shallow=False)
# Sanity check that we're reporting back that we uploaded the right files
assert get_object_name_from_path(root_bulk_put_folder, source_file_path) in parsed_result['uploaded-objects']
object_name_set.add(get_object_name_from_path(root_bulk_put_folder, source_file_path))
# If we try and put it in the same bucket without --overwrite then everything should be skipped. There should be prompts
result = invoke(['os', 'object', 'bulk-upload', '--namespace', util.NAMESPACE, '--bucket-name', bulk_put_bucket_name, '--src-dir', root_bulk_put_folder])
parsed_result = parse_json_response_from_mixed_output(result.output)
assert 'Are you sure you want to overwrite it?' in result.output
assert set(parsed_result['skipped-objects']) == object_name_set
assert parsed_result['upload-failures'] == {}
assert parsed_result['uploaded-objects'] == {}
# If we say to --no-overwrite then everything should be skipped. There should be no prompts
result = invoke(['os', 'object', 'bulk-upload', '--namespace', util.NAMESPACE, '--bucket-name', bulk_put_bucket_name, '--src-dir', root_bulk_put_folder, '--no-overwrite'])
parsed_result = parse_json_response_from_mixed_output(result.output)
assert 'Are you sure you want to overwrite it?' not in result.output
assert set(parsed_result['skipped-objects']) == object_name_set
assert parsed_result['upload-failures'] == {}
assert parsed_result['uploaded-objects'] == {}
# Now we force it
result = invoke(['os', 'object', 'bulk-upload', '--namespace', util.NAMESPACE, '--bucket-name', bulk_put_bucket_name, '--src-dir', root_bulk_put_folder, '--overwrite'])
parsed_result = parse_json_response_from_mixed_output(result.output)
assert parsed_result['skipped-objects'] == []
assert parsed_result['upload-failures'] == {}
assert len(parsed_result['uploaded-objects']) == len(object_name_set)
for object_name in object_name_set:
assert object_name in parsed_result['uploaded-objects']
shutil.rmtree(download_folder)
# Bulk puts objects with --content-type as auto
@util.skip_while_rerecording
def test_bulk_put_auto_content_type():
result = invoke(['os', 'object', 'bulk-upload', '--namespace', util.NAMESPACE, '--bucket-name', bulk_put_bucket_name, '--src-dir', root_bulk_put_folder, '--content-type', 'auto', '--overwrite'])
# No failures or skips and we uploaded everything
parsed_result = parse_json_response_from_mixed_output(result.output)
assert parsed_result['skipped-objects'] == []
assert parsed_result['upload-failures'] == {}
assert len(parsed_result['uploaded-objects']) == get_count_of_files_in_folder_and_subfolders(root_bulk_put_folder)
# Pull everything down and verify that the files match (everything in source appears in destination and they are equal)
download_folder = 'tests/temp/verify_files_{}'.format(bulk_put_bucket_name)
invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', bulk_put_bucket_name, '--download-dir', download_folder])
object_name_set = set()
for dir_name, subdir_list, file_list in os.walk(root_bulk_put_folder):
for file in file_list:
source_file_path = os.path.join(dir_name, file)
downloaded_file_path = source_file_path.replace(root_bulk_put_folder, download_folder)
assert os.path.exists(downloaded_file_path)
assert filecmp.cmp(source_file_path, downloaded_file_path, shallow=False)
assert guess_type(source_file_path) == guess_type(downloaded_file_path)
# Sanity check that we're reporting back that we uploaded the right files
assert get_object_name_from_path(root_bulk_put_folder, source_file_path) in parsed_result['uploaded-objects']
object_name_set.add(get_object_name_from_path(root_bulk_put_folder, source_file_path))
shutil.rmtree(download_folder)
# Tests that multipart params are applied:
#
# - Try to upload with a part size of 10MiB (this will force the large and mid-sized files to be multipart uploaded)
# - Try to upload with multipart disabled
@util.skip_while_rerecording
def test_bulk_put_with_multipart_params(object_storage_client):
create_bucket_request = oci.object_storage.models.CreateBucketDetails()
create_bucket_request.name = 'ObjectStorageBulkPutMultipartsTest_{}'.format(util.random_number_string())
create_bucket_request.compartment_id = util.COMPARTMENT_ID
util.clear_test_data(object_storage_client, util.NAMESPACE, util.COMPARTMENT_ID, create_bucket_request.name)
object_storage_client.create_bucket(util.NAMESPACE, create_bucket_request)
result = invoke([
'os', 'object', 'bulk-upload',
'--namespace', util.NAMESPACE,
'--bucket-name', create_bucket_request.name,
'--src-dir', root_bulk_put_folder,
'--part-size', '10'
])
parsed_result = parse_json_response_from_mixed_output(result.output)
assert parsed_result['skipped-objects'] == []
assert parsed_result['upload-failures'] == {}
assert len(parsed_result['uploaded-objects']) == get_count_of_files_in_folder_and_subfolders(root_bulk_put_folder)
result = invoke([
'os', 'object', 'bulk-upload',
'--namespace', util.NAMESPACE,
'--bucket-name', create_bucket_request.name,
'--src-dir', root_bulk_put_folder,
'--no-multipart',
'--overwrite'
])
parsed_result = parse_json_response_from_mixed_output(result.output)
assert parsed_result['skipped-objects'] == []
assert parsed_result['upload-failures'] == {}
assert len(parsed_result['uploaded-objects']) == get_count_of_files_in_folder_and_subfolders(root_bulk_put_folder)
delete_bucket_and_all_items(object_storage_client, create_bucket_request.name)
@util.skip_while_rerecording
def test_bulk_put_with_prefix():
result = invoke(['os', 'object', 'bulk-upload', '--namespace', util.NAMESPACE, '--bucket-name', bulk_put_bucket_name, '--src-dir', root_bulk_put_folder, '--object-prefix', 'bulk_put_prefix_test/'])
# No failures or skips and we uploaded everything
parsed_result = parse_json_response_from_mixed_output(result.output)
assert parsed_result['skipped-objects'] == []
assert parsed_result['upload-failures'] == {}
assert len(parsed_result['uploaded-objects']) == get_count_of_files_in_folder_and_subfolders(root_bulk_put_folder)
download_folder = 'tests/temp/verify_files_bulk_put_prefix_{}'.format(bulk_put_bucket_name)
invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', bulk_put_bucket_name, '--download-dir', download_folder, '--prefix', 'bulk_put_prefix_test/'])
actual_download_folder = os.path.join(download_folder, 'bulk_put_prefix_test')
for dir_name, subdir_list, file_list in os.walk(root_bulk_put_folder):
for file in file_list:
source_file_path = os.path.join(dir_name, file)
downloaded_file_path = source_file_path.replace(root_bulk_put_folder, actual_download_folder)
assert os.path.exists(downloaded_file_path)
assert filecmp.cmp(source_file_path, downloaded_file_path, shallow=False)
# Sanity check that we're reporting back that we uploaded the right files
assert 'bulk_put_prefix_test/{}'.format(get_object_name_from_path(root_bulk_put_folder, source_file_path)) in parsed_result['uploaded-objects']
shutil.rmtree(download_folder)
@util.skip_while_rerecording
def test_bulk_put_with_non_existent_folder():
fake_directory = 'tests/folder/not/exist'
result = invoke(['os', 'object', 'bulk-upload', '--namespace', util.NAMESPACE, '--bucket-name', bulk_put_bucket_name, '--src-dir', fake_directory])
assert 'UsageError' in result.output
assert 'The specified --src-dir {} (expanded to: {}) does not exist'.format(fake_directory, fake_directory) in result.output
@util.skip_while_rerecording
def test_bulk_put_get_delete_with_inclusions(object_storage_client):
inclusion_test_folder = os.path.join('tests', 'temp', 'os_bulk_upload_inclusion_test')
if not os.path.exists(inclusion_test_folder):
os.makedirs(inclusion_test_folder)
# Make some files for include/exclude
folders_to_files = {
'': ['test_file1.txt', 'test_file2.png'],
'subfolder': ['blah.pdf', 'hello.txt', 'testfile3.png'],
'subfolder/subfolder2': ['xyz.jpg', 'blag.txt', 'byz.jpg', 'testfile4.png']
}
for folder, files in six.iteritems(folders_to_files):
folder_path = os.path.join(inclusion_test_folder, folder)
if not os.path.exists(folder_path):
os.makedirs(folder_path)
for file in files:
file_path = os.path.join(folder_path, file)
with open(file_path, 'w') as f:
# For non-text extension types this won't create a valid file, but for testing is probably OK
f.write(generate_random_string(CONTENT_STRING_LENGTH))
result = invoke([
'os',
'object',
'bulk-upload',
'--namespace', util.NAMESPACE,
'--bucket-name', bulk_put_bucket_name,
'--src-dir', inclusion_test_folder,
'--object-prefix', 'inclusion_test/',
'--include', '*.txt', # Matches test_file1.txt, subfolder/hello.txt, subfolder/subfolder2/blag.txt
'--include', 'subfolder/*.png', # Matches subfolder/testfile3.png, subfolder/subfolder2/testfile4.png
'--include', 'subfolder/[b]lah.pdf', # Matches subfolder/blah.pdf
'--include', '*/[ax]yz.jpg' # Matches subfolder/subfolder2/xyz.jpg
])
parsed_result = parse_json_response_from_mixed_output(result.output)
assert parsed_result['skipped-objects'] == []
assert parsed_result['upload-failures'] == {}
expected_uploaded_files = [
'{}{}'.format('inclusion_test/', 'test_file1.txt'),
'{}{}'.format('inclusion_test/', 'subfolder/hello.txt'),
'{}{}'.format('inclusion_test/', 'subfolder/subfolder2/blag.txt'),
'{}{}'.format('inclusion_test/', 'subfolder/testfile3.png'),
'{}{}'.format('inclusion_test/', 'subfolder/subfolder2/testfile4.png'),
'{}{}'.format('inclusion_test/', 'subfolder/blah.pdf'),
'{}{}'.format('inclusion_test/', 'subfolder/subfolder2/xyz.jpg')
]
# Check that we uploaded what we said we did
assert len(parsed_result['uploaded-objects']) == len(expected_uploaded_files)
for f in expected_uploaded_files:
assert f in parsed_result['uploaded-objects']
download_folder_base = os.path.join('tests', 'temp', 'verify_os_bulk_upload_inclusion_test')
verify_downloaded_folders_for_inclusion_exclusion_tests(
expected_uploaded_files=expected_uploaded_files,
source_folder=inclusion_test_folder,
download_folder=download_folder_base,
download_prefix_no_slash='inclusion_test'
)
# Download objects with inclusions to make sure that works
target_download_folder = os.path.join(download_folder_base, 'get_with_include')
invoke([
'os', 'object', 'bulk-download',
'--namespace', util.NAMESPACE,
'--bucket-name', bulk_put_bucket_name,
'--download-dir', target_download_folder,
'--prefix', 'inclusion_test/',
'--include', '*.txt',
'--include', 'subfolder/*.png',
'--include', 'subfolder/blah.pdf',
])
expected_uploaded_files.remove('{}{}'.format('inclusion_test/', 'subfolder/subfolder2/xyz.jpg')) # This is not in our --include switches
assert not os.path.exists(os.path.join(target_download_folder, 'inclusion_test', 'subfolder', 'subfolder2', 'xyz.jpg'))
for expected_file in expected_uploaded_files:
target_file = os.path.join(target_download_folder, expected_file)
original_file = target_file.replace(os.path.join(target_download_folder, 'inclusion_test'), inclusion_test_folder)
assert os.path.exists(target_file)
assert filecmp.cmp(original_file, target_file, shallow=False)
# Download a specific object with inclusions
invoke([
'os', 'object', 'bulk-download',
'--namespace', util.NAMESPACE,
'--bucket-name', bulk_put_bucket_name,
'--download-dir', target_download_folder,
'--prefix', 'inclusion_test/',
'--include', 'subfolder/subfolder2/xyz.jpg'
])
assert os.path.exists(os.path.join(target_download_folder, 'inclusion_test', 'subfolder', 'subfolder2', 'xyz.jpg'))
# Delete objects with inclusions
result = invoke([
'os', 'object', 'bulk-delete',
'--namespace', util.NAMESPACE,
'--bucket-name', bulk_put_bucket_name,
'--prefix', 'inclusion_test/',
'--include', '*.txt',
'--include', 'subfolder/blah.pdf',
'--dry-run'
])
parsed_dry_run_result = parse_json_response_from_mixed_output(result.output)
assert len(parsed_dry_run_result['deleted-objects']) == 4
result = invoke([
'os', 'object', 'bulk-delete',
'--namespace', util.NAMESPACE,
'--bucket-name', bulk_put_bucket_name,
'--prefix', 'inclusion_test/',
'--include', '*.txt',
'--include', 'subfolder/blah.pdf',
'--force'
])
parsed_result = parse_json_response_from_mixed_output(result.output)
assert parsed_result['delete-failures'] == {}
assert set(parsed_result['deleted-objects']) == set(parsed_dry_run_result['deleted-objects'])
list_objects_responses = oci_cli_object_storage.objectstorage_cli_extended.retrying_list_objects(
client=object_storage_client,
request_id=None,
namespace=util.NAMESPACE,
bucket_name=bulk_put_bucket_name,
prefix='inclusion_test/',
start=None,
end=None,
limit=1000,
delimiter=None,
fields='name',
retrieve_all=True
)
remaining_objects = []
for response in list_objects_responses:
remaining_objects.extend(map(lambda obj: obj.name, response.data.objects))
assert len(remaining_objects) == 3
assert '{}{}'.format('inclusion_test/', 'subfolder/testfile3.png') in remaining_objects
assert '{}{}'.format('inclusion_test/', 'subfolder/subfolder2/testfile4.png') in remaining_objects
assert '{}{}'.format('inclusion_test/', 'subfolder/subfolder2/xyz.jpg') in remaining_objects
shutil.rmtree(target_download_folder)
shutil.rmtree(inclusion_test_folder)
@util.skip_while_rerecording
def test_bulk_put_get_delete_with_exclusions(object_storage_client):
exclusion_test_folder = os.path.join('tests', 'temp', 'os_bulk_upload_exclusion_test')
if not os.path.exists(exclusion_test_folder):
os.makedirs(exclusion_test_folder)
# Make some files for include/exclude
folders_to_files = {
'': ['test_file1.txt', 'test_file2.png'],
'subfolder': ['blah.pdf', 'hello.txt', 'testfile3.png'],
'subfolder/subfolder2': ['xyz.jpg', 'blag.txt', 'byz.jpg', 'testfile4.png']
}
for folder, files in six.iteritems(folders_to_files):
folder_path = os.path.join(exclusion_test_folder, folder)
if not os.path.exists(folder_path):
os.makedirs(folder_path)
for file in files:
file_path = os.path.join(folder_path, file)
with open(file_path, 'w') as f:
# For non-text extension types this won't create a valid file, but for testing is probably OK
f.write(generate_random_string(CONTENT_STRING_LENGTH))
result = invoke([
'os',
'object',
'bulk-upload',
'--namespace', util.NAMESPACE,
'--bucket-name', bulk_put_bucket_name,
'--src-dir', exclusion_test_folder,
'--object-prefix', 'exclusion_test/',
'--exclude', '*.txt',
'--exclude', '*.ps1', # Shouldn't match anything
'--exclude', 'subfolder/subfolder2/xyz.jpg',
'--exclude', 'subfolder/[spqr]lah.pdf' # blah.pdf should still be included because it's not slah.pdf, plah.pdf, qlah.pdf or rlah.pdf
])
parsed_result = parse_json_response_from_mixed_output(result.output)
assert parsed_result['skipped-objects'] == []
assert parsed_result['upload-failures'] == {}
expected_uploaded_files = [
'{}{}'.format('exclusion_test/', 'test_file2.png'),
'{}{}'.format('exclusion_test/', 'subfolder/blah.pdf'),
'{}{}'.format('exclusion_test/', 'subfolder/testfile3.png'),
'{}{}'.format('exclusion_test/', 'subfolder/subfolder2/byz.jpg'),
'{}{}'.format('exclusion_test/', 'subfolder/subfolder2/testfile4.png')
]
# Check that we uploaded what we said we did
assert len(parsed_result['uploaded-objects']) == len(expected_uploaded_files)
for f in expected_uploaded_files:
assert f in parsed_result['uploaded-objects']
download_folder_base = os.path.join('tests', 'temp', 'verify_os_bulk_upload_exclusion_test')
verify_downloaded_folders_for_inclusion_exclusion_tests(
expected_uploaded_files=expected_uploaded_files,
source_folder=exclusion_test_folder,
download_folder=download_folder_base,
download_prefix_no_slash='exclusion_test'
)
# Download objects with exclusions to make sure that works
target_download_folder = os.path.join(download_folder_base, 'get_with_exclude')
invoke([
'os', 'object', 'bulk-download',
'--namespace', util.NAMESPACE,
'--bucket-name', bulk_put_bucket_name,
'--download-dir', target_download_folder,
'--prefix', 'exclusion_test/',
'--exclude', '*.jpg',
'--exclude', 'subfolder/subfolder2/*.png',
'--exclude', 'subfolder/blah.pdf',
])
assert not os.path.exists(os.path.join(target_download_folder, 'exclusion_test', 'subfolder', 'blah.pdf'))
assert not os.path.exists(os.path.join(target_download_folder, 'exclusion_test', 'subfolder', 'subfolder2', 'byz.jpg'))
assert not os.path.exists(os.path.join(target_download_folder, 'exclusion_test', 'subfolder', 'subfolder2', 'testfile4.png'))
assert get_count_of_files_in_folder_and_subfolders(target_download_folder) == 2
assert os.path.exists(os.path.join(target_download_folder, 'exclusion_test', 'test_file2.png'))
assert os.path.exists(os.path.join(target_download_folder, 'exclusion_test', 'subfolder', 'testfile3.png'))
assert filecmp.cmp(
os.path.join(exclusion_test_folder, 'test_file2.png'),
os.path.join(target_download_folder, 'exclusion_test', 'test_file2.png')
)
assert filecmp.cmp(
os.path.join(exclusion_test_folder, 'subfolder', 'testfile3.png'),
os.path.join(target_download_folder, 'exclusion_test', 'subfolder', 'testfile3.png')
)
# Delete objects with exclusions
result = invoke([
'os', 'object', 'bulk-delete',
'--namespace', util.NAMESPACE,
'--bucket-name', bulk_put_bucket_name,
'--prefix', 'exclusion_test/',
'--exclude', '*.jpg',
'--exclude', 'subfolder/blah.pdf',
'--dry-run'
])
parsed_dry_run_result = parse_json_response_from_mixed_output(result.output)
assert len(parsed_dry_run_result['deleted-objects']) == 3
result = invoke([
'os', 'object', 'bulk-delete',
'--namespace', util.NAMESPACE,
'--bucket-name', bulk_put_bucket_name,
'--prefix', 'exclusion_test/',
'--exclude', '*.jpg',
'--exclude', 'subfolder/blah.pdf',
'--force'
])
parsed_result = parse_json_response_from_mixed_output(result.output)
assert parsed_result['delete-failures'] == {}
assert set(parsed_result['deleted-objects']) == set(parsed_dry_run_result['deleted-objects'])
list_objects_responses = oci_cli_object_storage.objectstorage_cli_extended.retrying_list_objects(
client=object_storage_client,
request_id=None,
namespace=util.NAMESPACE,
bucket_name=bulk_put_bucket_name,
prefix='exclusion_test/',
start=None,
end=None,
limit=1000,
delimiter=None,
fields='name',
retrieve_all=True
)
remaining_objects = []
for response in list_objects_responses:
remaining_objects.extend(map(lambda obj: obj.name, response.data.objects))
assert len(remaining_objects) == 2
assert '{}{}'.format('exclusion_test/', 'subfolder/blah.pdf') in remaining_objects
assert '{}{}'.format('exclusion_test/', 'subfolder/subfolder2/byz.jpg') in remaining_objects
shutil.rmtree(target_download_folder)
shutil.rmtree(exclusion_test_folder)
@util.skip_while_rerecording
def test_delete_when_no_objects_in_bucket(vcr_fixture, object_storage_client):
create_bucket_request = oci.object_storage.models.CreateBucketDetails()
create_bucket_request.name = 'ObjectStorageBulkDelete_{}'.format(util.random_number_string())
create_bucket_request.compartment_id = util.COMPARTMENT_ID
object_storage_client.create_bucket(util.NAMESPACE, create_bucket_request)
result = invoke(['os', 'object', 'bulk-delete', '--namespace', util.NAMESPACE, '--bucket-name', create_bucket_request.name])
assert 'There are no objects to delete in {}'.format(create_bucket_request.name) in result.output
delete_bucket_and_all_items(object_storage_client, create_bucket_request.name)
@util.skip_while_rerecording
def test_delete_dry_run(vcr_fixture):
# Dry-run against entire bucket
result = invoke(['os', 'object', 'bulk-delete', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--dry-run'])
parsed_result = json.loads(result.output)
assert set(parsed_result['deleted-objects']) == set(bulk_get_object_to_content.keys())
# Dry-run against a folder and all subfolders
result = invoke(['os', 'object', 'bulk-delete', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--prefix', 'a/b/', '--dry-run'])
parsed_result = json.loads(result.output)
expected_objects = set().union(bulk_get_prefix_to_object['a/b'], bulk_get_prefix_to_object['a/b/c'], bulk_get_prefix_to_object['a/b/c/d'])
assert set(parsed_result['deleted-objects']) == expected_objects
# Dry-run against a folder and no subfolders
result = invoke(['os', 'object', 'bulk-delete', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--prefix', 'a/b/', '--delimiter', '/', '--dry-run'])
parsed_result = json.loads(result.output)
assert set(parsed_result['deleted-objects']) == set(bulk_get_prefix_to_object['a/b'])
@util.skip_while_rerecording
def test_delete(object_storage_client):
create_bucket_request = oci.object_storage.models.CreateBucketDetails()
create_bucket_request.name = 'ObjectStorageBulkDelete_{}'.format(random.randint(0, 1000000))
create_bucket_request.compartment_id = util.COMPARTMENT_ID
util.clear_test_data(object_storage_client, util.NAMESPACE, util.COMPARTMENT_ID, create_bucket_request.name)
object_storage_client.create_bucket(util.NAMESPACE, create_bucket_request)
invoke(['os', 'object', 'bulk-upload', '--namespace', util.NAMESPACE, '--bucket-name', create_bucket_request.name, '--src-dir', root_bulk_put_folder])
num_objects_to_delete = get_count_of_files_in_folder_and_subfolders(root_bulk_put_folder)
# Sanity check that the bucket has things in it
assert get_number_of_objects_in_bucket(object_storage_client, create_bucket_request.name) > 0
result = invoke(['os', 'object', 'bulk-delete', '--namespace', util.NAMESPACE, '--bucket-name', create_bucket_request.name])
if num_objects_to_delete >= 1000:
confirm_prompt = 'WARNING: This command will delete at least {} objects. Are you sure you wish to continue?'.format(num_objects_to_delete)
else:
confirm_prompt = 'WARNING: This command will delete {} objects. Are you sure you wish to continue?'.format(num_objects_to_delete)
assert confirm_prompt in result.output
result = invoke(['os', 'object', 'bulk-delete', '--namespace', util.NAMESPACE, '--bucket-name', create_bucket_request.name, '--force'])
parsed_result = parse_json_response_from_mixed_output(result.output)
assert parsed_result['delete-failures'] == {}
assert len(parsed_result['deleted-objects']) == num_objects_to_delete
# Check that the bucket is now empty
assert get_number_of_objects_in_bucket(object_storage_client, create_bucket_request.name) == 0
delete_bucket_and_all_items(object_storage_client, create_bucket_request.name)
@util.skip_while_rerecording
def test_bulk_operation_table_output_query(object_storage_client):
create_bucket_request = oci.object_storage.models.CreateBucketDetails()
create_bucket_request.name = 'ObjectStorageTableOutput_{}'.format(util.random_number_string())
create_bucket_request.compartment_id = util.COMPARTMENT_ID
util.clear_test_data(object_storage_client, util.NAMESPACE, util.COMPARTMENT_ID, create_bucket_request.name)
object_storage_client.create_bucket(util.NAMESPACE, create_bucket_request)
result = invoke(['os', 'object', 'bulk-upload', '--namespace', util.NAMESPACE, '--bucket-name', create_bucket_request.name, '--src-dir', root_bulk_put_folder, '--output', 'table', '--query', "[?action=='Uploaded'].{file: file, \"opc-content-md5\": \"opc-content-md5\"}"])
assert 'file' in result.output
assert 'opc-content-md5' in result.output
assert 'etag' not in result.output
result = invoke(['os', 'object', 'bulk-delete', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--dry-run', '--output', 'table'])
assert 'action' in result.output
assert 'object' in result.output
assert '/a/Object_1' in result.output
result = invoke(['os', 'object', 'bulk-delete', '--namespace', util.NAMESPACE, '--bucket-name', bulk_get_bucket_name, '--dry-run', '--output', 'table', '--query', "[?object=='Object_0'][object]"])
assert 'action' not in result.output
assert '/a/Object_1' not in result.output
assert 'Object_0' in result.output
target_download_folder = os.path.join('tests', 'temp', create_bucket_request.name)
result = invoke([
'os', 'object', 'bulk-download',
'--namespace', util.NAMESPACE,
'--bucket-name', create_bucket_request.name,
'--download-dir', target_download_folder,
'--output', 'table',
])
delete_bucket_and_all_items(object_storage_client, create_bucket_request.name)
shutil.rmtree(target_download_folder)
def invoke(commands, debug=False, ** args):
if debug is True:
commands = ['--debug'] + commands
return util.invoke_command(commands, ** args)
def get_count_of_files_in_folder_and_subfolders(directory):
file_count = 0
for dir_name, subdir_list, file_list in os.walk(directory):
file_count = file_count + len(file_list)
return file_count
def generate_random_string(length):
if test_config_container.using_vcr_with_mock_responses():
return 'a' * length
else:
return ''.join(random.choice(string.ascii_lowercase) for i in range(length))
# Pull JSON data out of output which may have stuff other than JSON in it. Assumes that nothing
# comes after the JSON data
def parse_json_response_from_mixed_output(output):
lines = output.split('\n')
json_str = ''
object_begun = False
for line in lines:
if object_begun or line.startswith('{'):
object_begun = True
json_str += line
return json.loads(json_str)
# For the bulk operations, object names are taken from the file path of the thing we uploaded. Normalize to
# / in the paths (Windows can go both ways) then chop the front bit off
def get_object_name_from_path(path_root, full_path):
return full_path.replace(os.sep, '/').replace(path_root + '/', '')
def delete_bucket_and_all_items(object_storage_client, bucket_name):
list_object_responses = oci_cli_object_storage.objectstorage_cli_extended.retrying_list_objects(
client=object_storage_client,
request_id=None,
namespace=util.NAMESPACE,
bucket_name=bucket_name,
prefix=None,
start=None,
end=None,
limit=1000,
delimiter=None,
fields='name',
retrieve_all=True
)
for response in list_object_responses:
for obj in response.data.objects:
object_storage_client.delete_object(util.NAMESPACE, bucket_name, obj.name)
object_storage_client.delete_bucket(util.NAMESPACE, bucket_name)
def get_number_of_objects_in_bucket(object_storage_client, bucket_name):
list_object_responses = oci_cli_object_storage.objectstorage_cli_extended.retrying_list_objects(
client=object_storage_client,
request_id=None,
namespace=util.NAMESPACE,
bucket_name=bucket_name,
prefix=None,
start=None,
end=None,
limit=1000,
delimiter=None,
fields='name',
retrieve_all=True
)
num_objects_in_bucket = 0
for response in list_object_responses:
num_objects_in_bucket = num_objects_in_bucket + len(response.data.objects)
return num_objects_in_bucket
def verify_downloaded_folders_for_inclusion_exclusion_tests(expected_uploaded_files, source_folder, download_folder, download_prefix_no_slash):
# Download uploaded files and check they are the same
invoke(['os', 'object', 'bulk-download', '--namespace', util.NAMESPACE, '--bucket-name', bulk_put_bucket_name, '--download-dir', download_folder, '--prefix', download_prefix_no_slash + '/'])
# The strings in the expected_uploaded_files array have a "/" in them, but this doesn't match with paths on Windows. Using normpath converts these of
# "\" on Windows and so our matching/comparison works. For Linux/Unix/macOS this doesn't appear to have an impact
normalized_expected_uploaded_files = []
for euf in expected_uploaded_files:
normalized_expected_uploaded_files.append(os.path.normpath(euf))
actual_download_folder = os.path.join(download_folder, download_prefix_no_slash)
files_compared = 0
for dir_name, subdir_list, file_list in os.walk(source_folder):
for file in file_list:
source_file_path = os.path.join(dir_name, file)
downloaded_file_path = source_file_path.replace(source_folder, actual_download_folder)
if downloaded_file_path.replace(actual_download_folder, download_prefix_no_slash) in normalized_expected_uploaded_files:
files_compared += 1
assert os.path.exists(downloaded_file_path)
assert filecmp.cmp(source_file_path, downloaded_file_path, shallow=False)
assert files_compared == len(expected_uploaded_files)
shutil.rmtree(actual_download_folder)
| [((54, 1, 54, 45), 'pytest.fixture', 'pytest.fixture', (), '', False, 'import pytest\n'), ((59, 28, 59, 75), 'oci.object_storage.models.CreateBucketDetails', 'oci.object_storage.models.CreateBucketDetails', ({}, {}), '()', False, 'import oci\n'), ((62, 4, 62, 112), 'tests.util.clear_test_data', 'util.clear_test_data', ({(62, 25, 62, 46): 'object_storage_client', (62, 48, 62, 62): 'util.NAMESPACE', (62, 64, 62, 83): 'util.COMPARTMENT_ID', (62, 85, 62, 111): 'create_bucket_request.name'}, {}), '(object_storage_client, util.NAMESPACE, util.\n COMPARTMENT_ID, create_bucket_request.name)', False, 'from tests import util\n'), ((97, 28, 97, 75), 'oci.object_storage.models.CreateBucketDetails', 'oci.object_storage.models.CreateBucketDetails', ({}, {}), '()', False, 'import oci\n'), ((100, 4, 100, 112), 'tests.util.clear_test_data', 'util.clear_test_data', ({(100, 25, 100, 46): 'object_storage_client', (100, 48, 100, 62): 'util.NAMESPACE', (100, 64, 100, 83): 'util.COMPARTMENT_ID', (100, 85, 100, 111): 'create_bucket_request.name'}, {}), '(object_storage_client, util.NAMESPACE, util.\n COMPARTMENT_ID, create_bucket_request.name)', False, 'from tests import util\n'), ((133, 4, 133, 39), 'shutil.rmtree', 'shutil.rmtree', ({(133, 18, 133, 38): 'root_bulk_put_folder'}, {}), '(root_bulk_put_folder)', False, 'import shutil\n'), ((167, 4, 167, 34), 'shutil.rmtree', 'shutil.rmtree', ({(167, 18, 167, 33): 'download_folder'}, {}), '(download_folder)', False, 'import shutil\n'), ((197, 4, 197, 34), 'shutil.rmtree', 'shutil.rmtree', ({(197, 18, 197, 33): 'download_folder'}, {}), '(download_folder)', False, 'import shutil\n'), ((213, 4, 213, 34), 'shutil.rmtree', 'shutil.rmtree', ({(213, 18, 213, 33): 'download_folder'}, {}), '(download_folder)', False, 'import shutil\n'), ((241, 4, 241, 34), 'shutil.rmtree', 'shutil.rmtree', ({(241, 18, 241, 33): 'download_folder'}, {}), '(download_folder)', False, 'import shutil\n'), ((251, 4, 251, 34), 'shutil.rmtree', 'shutil.rmtree', ({(251, 18, 251, 33): 'download_folder'}, {}), '(download_folder)', False, 'import shutil\n'), ((256, 28, 256, 75), 'oci.object_storage.models.CreateBucketDetails', 'oci.object_storage.models.CreateBucketDetails', ({}, {}), '()', False, 'import oci\n'), ((259, 4, 259, 112), 'tests.util.clear_test_data', 'util.clear_test_data', ({(259, 25, 259, 46): 'object_storage_client', (259, 48, 259, 62): 'util.NAMESPACE', (259, 64, 259, 83): 'util.COMPARTMENT_ID', (259, 85, 259, 111): 'create_bucket_request.name'}, {}), '(object_storage_client, util.NAMESPACE, util.\n COMPARTMENT_ID, create_bucket_request.name)', False, 'from tests import util\n'), ((262, 26, 262, 84), 'os.path.join', 'os.path.join', ({(262, 39, 262, 46): '"""tests"""', (262, 48, 262, 54): '"""temp"""', (262, 56, 262, 83): '"""multipart_get_large_files"""'}, {}), "('tests', 'temp', 'multipart_get_large_files')", False, 'import os\n'), ((279, 28, 279, 93), 'os.path.join', 'os.path.join', ({(279, 41, 279, 48): '"""tests"""', (279, 50, 279, 56): '"""temp"""', (279, 58, 279, 92): '"""multipart_get_large_files_verify"""'}, {}), "('tests', 'temp', 'multipart_get_large_files_verify')", False, 'import os\n'), ((291, 4, 291, 38), 'shutil.rmtree', 'shutil.rmtree', ({(291, 18, 291, 37): 'large_file_root_dir'}, {}), '(large_file_root_dir)', False, 'import shutil\n'), ((292, 4, 292, 40), 'shutil.rmtree', 'shutil.rmtree', ({(292, 18, 292, 39): 'large_file_verify_dir'}, {}), '(large_file_verify_dir)', False, 'import shutil\n'), ((301, 20, 301, 45), 'json.loads', 'json.loads', ({(301, 31, 301, 44): 'result.output'}, {}), '(result.output)', False, 'import json\n'), ((306, 20, 306, 45), 'json.loads', 'json.loads', ({(306, 31, 306, 44): 'result.output'}, {}), '(result.output)', False, 'import json\n'), ((311, 20, 311, 45), 'json.loads', 'json.loads', ({(311, 31, 311, 44): 'result.output'}, {}), '(result.output)', False, 'import json\n'), ((316, 20, 316, 45), 'json.loads', 'json.loads', ({(316, 31, 316, 44): 'result.output'}, {}), '(result.output)', False, 'import json\n'), ((336, 44, 336, 73), 'os.walk', 'os.walk', ({(336, 52, 336, 72): 'root_bulk_put_folder'}, {}), '(root_bulk_put_folder)', False, 'import os\n'), ((373, 4, 373, 34), 'shutil.rmtree', 'shutil.rmtree', ({(373, 18, 373, 33): 'download_folder'}, {}), '(download_folder)', False, 'import shutil\n'), ((391, 44, 391, 73), 'os.walk', 'os.walk', ({(391, 52, 391, 72): 'root_bulk_put_folder'}, {}), '(root_bulk_put_folder)', False, 'import os\n'), ((404, 4, 404, 34), 'shutil.rmtree', 'shutil.rmtree', ({(404, 18, 404, 33): 'download_folder'}, {}), '(download_folder)', False, 'import shutil\n'), ((413, 28, 413, 75), 'oci.object_storage.models.CreateBucketDetails', 'oci.object_storage.models.CreateBucketDetails', ({}, {}), '()', False, 'import oci\n'), ((416, 4, 416, 112), 'tests.util.clear_test_data', 'util.clear_test_data', ({(416, 25, 416, 46): 'object_storage_client', (416, 48, 416, 62): 'util.NAMESPACE', (416, 64, 416, 83): 'util.COMPARTMENT_ID', (416, 85, 416, 111): 'create_bucket_request.name'}, {}), '(object_storage_client, util.NAMESPACE, util.\n COMPARTMENT_ID, create_bucket_request.name)', False, 'from tests import util\n'), ((460, 29, 460, 82), 'os.path.join', 'os.path.join', ({(460, 42, 460, 57): 'download_folder', (460, 59, 460, 81): '"""bulk_put_prefix_test"""'}, {}), "(download_folder, 'bulk_put_prefix_test')", False, 'import os\n'), ((461, 44, 461, 73), 'os.walk', 'os.walk', ({(461, 52, 461, 72): 'root_bulk_put_folder'}, {}), '(root_bulk_put_folder)', False, 'import os\n'), ((472, 4, 472, 34), 'shutil.rmtree', 'shutil.rmtree', ({(472, 18, 472, 33): 'download_folder'}, {}), '(download_folder)', False, 'import shutil\n'), ((486, 28, 486, 90), 'os.path.join', 'os.path.join', ({(486, 41, 486, 48): '"""tests"""', (486, 50, 486, 56): '"""temp"""', (486, 58, 486, 89): '"""os_bulk_upload_inclusion_test"""'}, {}), "('tests', 'temp', 'os_bulk_upload_inclusion_test')", False, 'import os\n'), ((496, 25, 496, 56), 'six.iteritems', 'six.iteritems', ({(496, 39, 496, 55): 'folders_to_files'}, {}), '(folders_to_files)', False, 'import six\n'), ((539, 27, 539, 96), 'os.path.join', 'os.path.join', ({(539, 40, 539, 47): '"""tests"""', (539, 49, 539, 55): '"""temp"""', (539, 57, 539, 95): '"""verify_os_bulk_upload_inclusion_test"""'}, {}), "('tests', 'temp', 'verify_os_bulk_upload_inclusion_test')", False, 'import os\n'), ((548, 29, 548, 83), 'os.path.join', 'os.path.join', ({(548, 42, 548, 62): 'download_folder_base', (548, 64, 548, 82): '"""get_with_include"""'}, {}), "(download_folder_base, 'get_with_include')", False, 'import os\n'), ((605, 29, 617, 5), 'services.object_storage.src.oci_cli_object_storage.objectstorage_cli_extended.retrying_list_objects', 'oci_cli_object_storage.objectstorage_cli_extended.retrying_list_objects', (), '', True, 'import services.object_storage.src.oci_cli_object_storage as oci_cli_object_storage\n'), ((626, 4, 626, 41), 'shutil.rmtree', 'shutil.rmtree', ({(626, 18, 626, 40): 'target_download_folder'}, {}), '(target_download_folder)', False, 'import shutil\n'), ((627, 4, 627, 40), 'shutil.rmtree', 'shutil.rmtree', ({(627, 18, 627, 39): 'inclusion_test_folder'}, {}), '(inclusion_test_folder)', False, 'import shutil\n'), ((632, 28, 632, 90), 'os.path.join', 'os.path.join', ({(632, 41, 632, 48): '"""tests"""', (632, 50, 632, 56): '"""temp"""', (632, 58, 632, 89): '"""os_bulk_upload_exclusion_test"""'}, {}), "('tests', 'temp', 'os_bulk_upload_exclusion_test')", False, 'import os\n'), ((642, 25, 642, 56), 'six.iteritems', 'six.iteritems', ({(642, 39, 642, 55): 'folders_to_files'}, {}), '(folders_to_files)', False, 'import six\n'), ((683, 27, 683, 96), 'os.path.join', 'os.path.join', ({(683, 40, 683, 47): '"""tests"""', (683, 49, 683, 55): '"""temp"""', (683, 57, 683, 95): '"""verify_os_bulk_upload_exclusion_test"""'}, {}), "('tests', 'temp', 'verify_os_bulk_upload_exclusion_test')", False, 'import os\n'), ((692, 29, 692, 83), 'os.path.join', 'os.path.join', ({(692, 42, 692, 62): 'download_folder_base', (692, 64, 692, 82): '"""get_with_exclude"""'}, {}), "(download_folder_base, 'get_with_exclude')", False, 'import os\n'), ((747, 29, 759, 5), 'services.object_storage.src.oci_cli_object_storage.objectstorage_cli_extended.retrying_list_objects', 'oci_cli_object_storage.objectstorage_cli_extended.retrying_list_objects', (), '', True, 'import services.object_storage.src.oci_cli_object_storage as oci_cli_object_storage\n'), ((767, 4, 767, 41), 'shutil.rmtree', 'shutil.rmtree', ({(767, 18, 767, 40): 'target_download_folder'}, {}), '(target_download_folder)', False, 'import shutil\n'), ((768, 4, 768, 40), 'shutil.rmtree', 'shutil.rmtree', ({(768, 18, 768, 39): 'exclusion_test_folder'}, {}), '(exclusion_test_folder)', False, 'import shutil\n'), ((773, 28, 773, 75), 'oci.object_storage.models.CreateBucketDetails', 'oci.object_storage.models.CreateBucketDetails', ({}, {}), '()', False, 'import oci\n'), ((788, 20, 788, 45), 'json.loads', 'json.loads', ({(788, 31, 788, 44): 'result.output'}, {}), '(result.output)', False, 'import json\n'), ((793, 20, 793, 45), 'json.loads', 'json.loads', ({(793, 31, 793, 44): 'result.output'}, {}), '(result.output)', False, 'import json\n'), ((799, 20, 799, 45), 'json.loads', 'json.loads', ({(799, 31, 799, 44): 'result.output'}, {}), '(result.output)', False, 'import json\n'), ((805, 28, 805, 75), 'oci.object_storage.models.CreateBucketDetails', 'oci.object_storage.models.CreateBucketDetails', ({}, {}), '()', False, 'import oci\n'), ((808, 4, 808, 112), 'tests.util.clear_test_data', 'util.clear_test_data', ({(808, 25, 808, 46): 'object_storage_client', (808, 48, 808, 62): 'util.NAMESPACE', (808, 64, 808, 83): 'util.COMPARTMENT_ID', (808, 85, 808, 111): 'create_bucket_request.name'}, {}), '(object_storage_client, util.NAMESPACE, util.\n COMPARTMENT_ID, create_bucket_request.name)', False, 'from tests import util\n'), ((837, 28, 837, 75), 'oci.object_storage.models.CreateBucketDetails', 'oci.object_storage.models.CreateBucketDetails', ({}, {}), '()', False, 'import oci\n'), ((840, 4, 840, 112), 'tests.util.clear_test_data', 'util.clear_test_data', ({(840, 25, 840, 46): 'object_storage_client', (840, 48, 840, 62): 'util.NAMESPACE', (840, 64, 840, 83): 'util.COMPARTMENT_ID', (840, 85, 840, 111): 'create_bucket_request.name'}, {}), '(object_storage_client, util.NAMESPACE, util.\n COMPARTMENT_ID, create_bucket_request.name)', False, 'from tests import util\n'), ((858, 29, 858, 86), 'os.path.join', 'os.path.join', ({(858, 42, 858, 49): '"""tests"""', (858, 51, 858, 57): '"""temp"""', (858, 59, 858, 85): 'create_bucket_request.name'}, {}), "('tests', 'temp', create_bucket_request.name)", False, 'import os\n'), ((869, 4, 869, 41), 'shutil.rmtree', 'shutil.rmtree', ({(869, 18, 869, 40): 'target_download_folder'}, {}), '(target_download_folder)', False, 'import shutil\n'), ((875, 11, 875, 49), 'tests.util.invoke_command', 'util.invoke_command', ({(875, 31, 875, 39): 'commands'}, {}), '(commands, **args)', False, 'from tests import util\n'), ((880, 44, 880, 62), 'os.walk', 'os.walk', ({(880, 52, 880, 61): 'directory'}, {}), '(directory)', False, 'import os\n'), ((887, 7, 887, 60), 'tests.test_config_container.using_vcr_with_mock_responses', 'test_config_container.using_vcr_with_mock_responses', ({}, {}), '()', False, 'from tests import test_config_container\n'), ((904, 11, 904, 31), 'json.loads', 'json.loads', ({(904, 22, 904, 30): 'json_str'}, {}), '(json_str)', False, 'import json\n'), ((914, 28, 926, 5), 'services.object_storage.src.oci_cli_object_storage.objectstorage_cli_extended.retrying_list_objects', 'oci_cli_object_storage.objectstorage_cli_extended.retrying_list_objects', (), '', True, 'import services.object_storage.src.oci_cli_object_storage as oci_cli_object_storage\n'), ((934, 28, 946, 5), 'services.object_storage.src.oci_cli_object_storage.objectstorage_cli_extended.retrying_list_objects', 'oci_cli_object_storage.objectstorage_cli_extended.retrying_list_objects', (), '', True, 'import services.object_storage.src.oci_cli_object_storage as oci_cli_object_storage\n'), ((964, 29, 964, 84), 'os.path.join', 'os.path.join', ({(964, 42, 964, 57): 'download_folder', (964, 59, 964, 83): 'download_prefix_no_slash'}, {}), '(download_folder, download_prefix_no_slash)', False, 'import os\n'), ((966, 44, 966, 66), 'os.walk', 'os.walk', ({(966, 52, 966, 65): 'source_folder'}, {}), '(source_folder)', False, 'import os\n'), ((977, 4, 977, 41), 'shutil.rmtree', 'shutil.rmtree', ({(977, 18, 977, 40): 'actual_download_folder'}, {}), '(actual_download_folder)', False, 'import shutil\n'), ((60, 70, 60, 97), 'tests.util.random_number_string', 'util.random_number_string', ({}, {}), '()', False, 'from tests import util\n'), ((92, 59, 92, 86), 'tests.util.random_number_string', 'util.random_number_string', ({}, {}), '()', False, 'from tests import util\n'), ((94, 11, 94, 47), 'os.path.exists', 'os.path.exists', ({(94, 26, 94, 46): 'bulk_put_folder_leaf'}, {}), '(bulk_put_folder_leaf)', False, 'import os\n'), ((95, 8, 95, 41), 'os.makedirs', 'os.makedirs', ({(95, 20, 95, 40): 'bulk_put_folder_leaf'}, {}), '(bulk_put_folder_leaf)', False, 'import os\n'), ((98, 70, 98, 97), 'tests.util.random_number_string', 'util.random_number_string', ({}, {}), '()', False, 'from tests import util\n'), ((138, 32, 138, 146), 'services.object_storage.src.oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage', 'oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage', ({(138, 128, 138, 145): '"""/this/is/a/path"""'}, {}), "(\n '/this/is/a/path')", True, 'import services.object_storage.src.oci_cli_object_storage as oci_cli_object_storage\n'), ((139, 32, 139, 151), 'services.object_storage.src.oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage', 'oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage', ({(139, 128, 139, 145): '"""/this/is/a/path"""', (139, 147, 139, 150): '"""/"""'}, {}), "(\n '/this/is/a/path', '/')", True, 'import services.object_storage.src.oci_cli_object_storage as oci_cli_object_storage\n'), ((140, 32, 140, 156), 'services.object_storage.src.oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage', 'oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage', ({(140, 128, 140, 149): '"""\\\\this\\\\is\\\\a\\\\path"""', (140, 151, 140, 155): '"""\\\\"""'}, {}), "(\n '\\\\this\\\\is\\\\a\\\\path', '\\\\')", True, 'import services.object_storage.src.oci_cli_object_storage as oci_cli_object_storage\n'), ((141, 32, 141, 154), 'services.object_storage.src.oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage', 'oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage', ({(141, 128, 141, 147): '"""\\\\this/is/a\\\\path"""', (141, 149, 141, 153): '"""\\\\"""'}, {}), "(\n '\\\\this/is/a\\\\path', '\\\\')", True, 'import services.object_storage.src.oci_cli_object_storage as oci_cli_object_storage\n'), ((143, 28, 143, 138), 'services.object_storage.src.oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage', 'oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage', ({(143, 124, 143, 137): '"""thisisapath"""'}, {}), "(\n 'thisisapath')", True, 'import services.object_storage.src.oci_cli_object_storage as oci_cli_object_storage\n'), ((144, 28, 144, 143), 'services.object_storage.src.oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage', 'oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage', ({(144, 124, 144, 137): '"""thisisapath"""', (144, 139, 144, 142): '"""/"""'}, {}), "(\n 'thisisapath', '/')", True, 'import services.object_storage.src.oci_cli_object_storage as oci_cli_object_storage\n'), ((145, 28, 145, 144), 'services.object_storage.src.oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage', 'oci_cli_object_storage.objectstorage_cli_extended.normalize_object_name_path_for_object_storage', ({(145, 124, 145, 137): '"""thisisapath"""', (145, 139, 145, 143): '"""\\\\"""'}, {}), "(\n 'thisisapath', '\\\\')", True, 'import services.object_storage.src.oci_cli_object_storage as oci_cli_object_storage\n'), ((178, 20, 178, 62), 'os.path.join', 'os.path.join', ({(178, 33, 178, 48): 'download_folder', (178, 50, 178, 61): 'object_name'}, {}), '(download_folder, object_name)', False, 'import os\n'), ((184, 20, 184, 62), 'os.path.join', 'os.path.join', ({(184, 33, 184, 48): 'download_folder', (184, 50, 184, 61): 'object_name'}, {}), '(download_folder, object_name)', False, 'import os\n'), ((190, 20, 190, 62), 'os.path.join', 'os.path.join', ({(190, 33, 190, 48): 'download_folder', (190, 50, 190, 61): 'object_name'}, {}), '(download_folder, object_name)', False, 'import os\n'), ((206, 20, 206, 62), 'os.path.join', 'os.path.join', ({(206, 33, 206, 48): 'download_folder', (206, 50, 206, 61): 'object_name'}, {}), '(download_folder, object_name)', False, 'import os\n'), ((257, 80, 257, 107), 'tests.util.random_number_string', 'util.random_number_string', ({}, {}), '()', False, 'from tests import util\n'), ((263, 11, 263, 46), 'os.path.exists', 'os.path.exists', ({(263, 26, 263, 45): 'large_file_root_dir'}, {}), '(large_file_root_dir)', False, 'import os\n'), ((264, 8, 264, 40), 'os.makedirs', 'os.makedirs', ({(264, 20, 264, 39): 'large_file_root_dir'}, {}), '(large_file_root_dir)', False, 'import os\n'), ((265, 27, 265, 69), 'os.path.join', 'os.path.join', ({(265, 40, 265, 59): 'large_file_root_dir', (265, 61, 265, 68): '"""1.bin"""'}, {}), "(large_file_root_dir, '1.bin')", False, 'import os\n'), ((266, 27, 266, 69), 'os.path.join', 'os.path.join', ({(266, 40, 266, 59): 'large_file_root_dir', (266, 61, 266, 68): '"""2.bin"""'}, {}), "(large_file_root_dir, '2.bin')", False, 'import os\n'), ((267, 27, 267, 69), 'os.path.join', 'os.path.join', ({(267, 40, 267, 59): 'large_file_root_dir', (267, 61, 267, 68): '"""3.bin"""'}, {}), "(large_file_root_dir, '3.bin')", False, 'import os\n'), ((268, 27, 268, 69), 'os.path.join', 'os.path.join', ({(268, 40, 268, 59): 'large_file_root_dir', (268, 61, 268, 68): '"""4.bin"""'}, {}), "(large_file_root_dir, '4.bin')", False, 'import os\n'), ((269, 27, 269, 69), 'os.path.join', 'os.path.join', ({(269, 40, 269, 59): 'large_file_root_dir', (269, 61, 269, 68): '"""5.bin"""'}, {}), "(large_file_root_dir, '5.bin')", False, 'import os\n'), ((270, 27, 270, 69), 'os.path.join', 'os.path.join', ({(270, 40, 270, 59): 'large_file_root_dir', (270, 61, 270, 68): '"""6.bin"""'}, {}), "(large_file_root_dir, '6.bin')", False, 'import os\n'), ((284, 23, 284, 65), 'os.path.join', 'os.path.join', ({(284, 36, 284, 55): 'large_file_root_dir', (284, 57, 284, 64): '"""1.bin"""'}, {}), "(large_file_root_dir, '1.bin')", False, 'import os\n'), ((284, 67, 284, 111), 'os.path.join', 'os.path.join', ({(284, 80, 284, 101): 'large_file_verify_dir', (284, 103, 284, 110): '"""1.bin"""'}, {}), "(large_file_verify_dir, '1.bin')", False, 'import os\n'), ((285, 23, 285, 65), 'os.path.join', 'os.path.join', ({(285, 36, 285, 55): 'large_file_root_dir', (285, 57, 285, 64): '"""2.bin"""'}, {}), "(large_file_root_dir, '2.bin')", False, 'import os\n'), ((285, 67, 285, 111), 'os.path.join', 'os.path.join', ({(285, 80, 285, 101): 'large_file_verify_dir', (285, 103, 285, 110): '"""2.bin"""'}, {}), "(large_file_verify_dir, '2.bin')", False, 'import os\n'), ((286, 23, 286, 65), 'os.path.join', 'os.path.join', ({(286, 36, 286, 55): 'large_file_root_dir', (286, 57, 286, 64): '"""3.bin"""'}, {}), "(large_file_root_dir, '3.bin')", False, 'import os\n'), ((286, 67, 286, 111), 'os.path.join', 'os.path.join', ({(286, 80, 286, 101): 'large_file_verify_dir', (286, 103, 286, 110): '"""3.bin"""'}, {}), "(large_file_verify_dir, '3.bin')", False, 'import os\n'), ((287, 23, 287, 65), 'os.path.join', 'os.path.join', ({(287, 36, 287, 55): 'large_file_root_dir', (287, 57, 287, 64): '"""4.bin"""'}, {}), "(large_file_root_dir, '4.bin')", False, 'import os\n'), ((287, 67, 287, 111), 'os.path.join', 'os.path.join', ({(287, 80, 287, 101): 'large_file_verify_dir', (287, 103, 287, 110): '"""4.bin"""'}, {}), "(large_file_verify_dir, '4.bin')", False, 'import os\n'), ((288, 23, 288, 65), 'os.path.join', 'os.path.join', ({(288, 36, 288, 55): 'large_file_root_dir', (288, 57, 288, 64): '"""5.bin"""'}, {}), "(large_file_root_dir, '5.bin')", False, 'import os\n'), ((288, 67, 288, 111), 'os.path.join', 'os.path.join', ({(288, 80, 288, 101): 'large_file_verify_dir', (288, 103, 288, 110): '"""5.bin"""'}, {}), "(large_file_verify_dir, '5.bin')", False, 'import os\n'), ((289, 23, 289, 65), 'os.path.join', 'os.path.join', ({(289, 36, 289, 55): 'large_file_root_dir', (289, 57, 289, 64): '"""6.bin"""'}, {}), "(large_file_root_dir, '6.bin')", False, 'import os\n'), ((289, 67, 289, 111), 'os.path.join', 'os.path.join', ({(289, 80, 289, 101): 'large_file_verify_dir', (289, 103, 289, 110): '"""6.bin"""'}, {}), "(large_file_verify_dir, '6.bin')", False, 'import os\n'), ((414, 80, 414, 107), 'tests.util.random_number_string', 'util.random_number_string', ({}, {}), '()', False, 'from tests import util\n'), ((487, 11, 487, 48), 'os.path.exists', 'os.path.exists', ({(487, 26, 487, 47): 'inclusion_test_folder'}, {}), '(inclusion_test_folder)', False, 'import os\n'), ((488, 8, 488, 42), 'os.makedirs', 'os.makedirs', ({(488, 20, 488, 41): 'inclusion_test_folder'}, {}), '(inclusion_test_folder)', False, 'import os\n'), ((497, 22, 497, 65), 'os.path.join', 'os.path.join', ({(497, 35, 497, 56): 'inclusion_test_folder', (497, 58, 497, 64): 'folder'}, {}), '(inclusion_test_folder, folder)', False, 'import os\n'), ((562, 22, 562, 73), 'os.path.join', 'os.path.join', ({(562, 35, 562, 57): 'target_download_folder', (562, 59, 562, 72): 'expected_file'}, {}), '(target_download_folder, expected_file)', False, 'import os\n'), ((565, 15, 565, 42), 'os.path.exists', 'os.path.exists', ({(565, 30, 565, 41): 'target_file'}, {}), '(target_file)', False, 'import os\n'), ((566, 15, 566, 69), 'filecmp.cmp', 'filecmp.cmp', (), '', False, 'import filecmp\n'), ((577, 26, 577, 118), 'os.path.join', 'os.path.join', ({(577, 39, 577, 61): 'target_download_folder', (577, 63, 577, 79): '"""inclusion_test"""', (577, 81, 577, 92): '"""subfolder"""', (577, 94, 577, 106): '"""subfolder2"""', (577, 108, 577, 117): '"""xyz.jpg"""'}, {}), "(target_download_folder, 'inclusion_test', 'subfolder',\n 'subfolder2', 'xyz.jpg')", False, 'import os\n'), ((633, 11, 633, 48), 'os.path.exists', 'os.path.exists', ({(633, 26, 633, 47): 'exclusion_test_folder'}, {}), '(exclusion_test_folder)', False, 'import os\n'), ((634, 8, 634, 42), 'os.makedirs', 'os.makedirs', ({(634, 20, 634, 41): 'exclusion_test_folder'}, {}), '(exclusion_test_folder)', False, 'import os\n'), ((643, 22, 643, 65), 'os.path.join', 'os.path.join', ({(643, 35, 643, 56): 'exclusion_test_folder', (643, 58, 643, 64): 'folder'}, {}), '(exclusion_test_folder, folder)', False, 'import os\n'), ((709, 26, 709, 98), 'os.path.join', 'os.path.join', ({(709, 39, 709, 61): 'target_download_folder', (709, 63, 709, 79): '"""exclusion_test"""', (709, 81, 709, 97): '"""test_file2.png"""'}, {}), "(target_download_folder, 'exclusion_test', 'test_file2.png')", False, 'import os\n'), ((710, 26, 710, 110), 'os.path.join', 'os.path.join', ({(710, 39, 710, 61): 'target_download_folder', (710, 63, 710, 79): '"""exclusion_test"""', (710, 81, 710, 92): '"""subfolder"""', (710, 94, 710, 109): '"""testfile3.png"""'}, {}), "(target_download_folder, 'exclusion_test', 'subfolder',\n 'testfile3.png')", False, 'import os\n'), ((713, 8, 713, 61), 'os.path.join', 'os.path.join', ({(713, 21, 713, 42): 'exclusion_test_folder', (713, 44, 713, 60): '"""test_file2.png"""'}, {}), "(exclusion_test_folder, 'test_file2.png')", False, 'import os\n'), ((714, 8, 714, 80), 'os.path.join', 'os.path.join', ({(714, 21, 714, 43): 'target_download_folder', (714, 45, 714, 61): '"""exclusion_test"""', (714, 63, 714, 79): '"""test_file2.png"""'}, {}), "(target_download_folder, 'exclusion_test', 'test_file2.png')", False, 'import os\n'), ((717, 8, 717, 73), 'os.path.join', 'os.path.join', ({(717, 21, 717, 42): 'exclusion_test_folder', (717, 44, 717, 55): '"""subfolder"""', (717, 57, 717, 72): '"""testfile3.png"""'}, {}), "(exclusion_test_folder, 'subfolder', 'testfile3.png')", False, 'import os\n'), ((718, 8, 718, 92), 'os.path.join', 'os.path.join', ({(718, 21, 718, 43): 'target_download_folder', (718, 45, 718, 61): '"""exclusion_test"""', (718, 63, 718, 74): '"""subfolder"""', (718, 76, 718, 91): '"""testfile3.png"""'}, {}), "(target_download_folder, 'exclusion_test', 'subfolder',\n 'testfile3.png')", False, 'import os\n'), ((774, 69, 774, 96), 'tests.util.random_number_string', 'util.random_number_string', ({}, {}), '()', False, 'from tests import util\n'), ((806, 69, 806, 95), 'random.randint', 'random.randint', ({(806, 84, 806, 85): '0', (806, 87, 806, 94): '1000000'}, {}), '(0, 1000000)', False, 'import random\n'), ((838, 70, 838, 97), 'tests.util.random_number_string', 'util.random_number_string', ({}, {}), '()', False, 'from tests import util\n'), ((110, 26, 110, 71), 'os.path.join', 'os.path.join', ({(110, 39, 110, 59): 'root_bulk_put_folder', (110, 61, 110, 70): 'subfolder'}, {}), '(root_bulk_put_folder, subfolder)', False, 'import os\n'), ((157, 24, 157, 70), 'os.path.join', 'os.path.join', ({(157, 37, 157, 52): 'download_folder', (157, 54, 157, 69): 'object_name[1:]'}, {}), '(download_folder, object_name[1:])', False, 'import os\n'), ((159, 24, 159, 66), 'os.path.join', 'os.path.join', ({(159, 37, 159, 52): 'download_folder', (159, 54, 159, 65): 'object_name'}, {}), '(download_folder, object_name)', False, 'import os\n'), ((338, 31, 338, 59), 'os.path.join', 'os.path.join', ({(338, 44, 338, 52): 'dir_name', (338, 54, 338, 58): 'file'}, {}), '(dir_name, file)', False, 'import os\n'), ((341, 19, 341, 55), 'os.path.exists', 'os.path.exists', ({(341, 34, 341, 54): 'downloaded_file_path'}, {}), '(downloaded_file_path)', False, 'import os\n'), ((342, 19, 342, 85), 'filecmp.cmp', 'filecmp.cmp', (), '', False, 'import filecmp\n'), ((393, 31, 393, 59), 'os.path.join', 'os.path.join', ({(393, 44, 393, 52): 'dir_name', (393, 54, 393, 58): 'file'}, {}), '(dir_name, file)', False, 'import os\n'), ((396, 19, 396, 55), 'os.path.exists', 'os.path.exists', ({(396, 34, 396, 54): 'downloaded_file_path'}, {}), '(downloaded_file_path)', False, 'import os\n'), ((397, 19, 397, 85), 'filecmp.cmp', 'filecmp.cmp', (), '', False, 'import filecmp\n'), ((463, 31, 463, 59), 'os.path.join', 'os.path.join', ({(463, 44, 463, 52): 'dir_name', (463, 54, 463, 58): 'file'}, {}), '(dir_name, file)', False, 'import os\n'), ((466, 19, 466, 55), 'os.path.exists', 'os.path.exists', ({(466, 34, 466, 54): 'downloaded_file_path'}, {}), '(downloaded_file_path)', False, 'import os\n'), ((467, 19, 467, 85), 'filecmp.cmp', 'filecmp.cmp', (), '', False, 'import filecmp\n'), ((498, 15, 498, 42), 'os.path.exists', 'os.path.exists', ({(498, 30, 498, 41): 'folder_path'}, {}), '(folder_path)', False, 'import os\n'), ((499, 12, 499, 36), 'os.makedirs', 'os.makedirs', ({(499, 24, 499, 35): 'folder_path'}, {}), '(folder_path)', False, 'import os\n'), ((502, 24, 502, 55), 'os.path.join', 'os.path.join', ({(502, 37, 502, 48): 'folder_path', (502, 50, 502, 54): 'file'}, {}), '(folder_path, file)', False, 'import os\n'), ((560, 30, 560, 122), 'os.path.join', 'os.path.join', ({(560, 43, 560, 65): 'target_download_folder', (560, 67, 560, 83): '"""inclusion_test"""', (560, 85, 560, 96): '"""subfolder"""', (560, 98, 560, 110): '"""subfolder2"""', (560, 112, 560, 121): '"""xyz.jpg"""'}, {}), "(target_download_folder, 'inclusion_test', 'subfolder',\n 'subfolder2', 'xyz.jpg')", False, 'import os\n'), ((563, 44, 563, 98), 'os.path.join', 'os.path.join', ({(563, 57, 563, 79): 'target_download_folder', (563, 81, 563, 97): '"""inclusion_test"""'}, {}), "(target_download_folder, 'inclusion_test')", False, 'import os\n'), ((644, 15, 644, 42), 'os.path.exists', 'os.path.exists', ({(644, 30, 644, 41): 'folder_path'}, {}), '(folder_path)', False, 'import os\n'), ((645, 12, 645, 36), 'os.makedirs', 'os.makedirs', ({(645, 24, 645, 35): 'folder_path'}, {}), '(folder_path)', False, 'import os\n'), ((648, 24, 648, 55), 'os.path.join', 'os.path.join', ({(648, 37, 648, 48): 'folder_path', (648, 50, 648, 54): 'file'}, {}), '(folder_path, file)', False, 'import os\n'), ((704, 30, 704, 109), 'os.path.join', 'os.path.join', ({(704, 43, 704, 65): 'target_download_folder', (704, 67, 704, 83): '"""exclusion_test"""', (704, 85, 704, 96): '"""subfolder"""', (704, 98, 704, 108): '"""blah.pdf"""'}, {}), "(target_download_folder, 'exclusion_test', 'subfolder', 'blah.pdf')", False, 'import os\n'), ((705, 30, 705, 122), 'os.path.join', 'os.path.join', ({(705, 43, 705, 65): 'target_download_folder', (705, 67, 705, 83): '"""exclusion_test"""', (705, 85, 705, 96): '"""subfolder"""', (705, 98, 705, 110): '"""subfolder2"""', (705, 112, 705, 121): '"""byz.jpg"""'}, {}), "(target_download_folder, 'exclusion_test', 'subfolder',\n 'subfolder2', 'byz.jpg')", False, 'import os\n'), ((706, 30, 706, 128), 'os.path.join', 'os.path.join', ({(706, 43, 706, 65): 'target_download_folder', (706, 67, 706, 83): '"""exclusion_test"""', (706, 85, 706, 96): '"""subfolder"""', (706, 98, 706, 110): '"""subfolder2"""', (706, 112, 706, 127): '"""testfile4.png"""'}, {}), "(target_download_folder, 'exclusion_test', 'subfolder',\n 'subfolder2', 'testfile4.png')", False, 'import os\n'), ((962, 50, 962, 71), 'os.path.normpath', 'os.path.normpath', ({(962, 67, 962, 70): 'euf'}, {}), '(euf)', False, 'import os\n'), ((968, 31, 968, 59), 'os.path.join', 'os.path.join', ({(968, 44, 968, 52): 'dir_name', (968, 54, 968, 58): 'file'}, {}), '(dir_name, file)', False, 'import os\n'), ((45, 9, 45, 105), 'tests.test_config_container.create_vcr', 'test_config_container.create_vcr', (), '', False, 'from tests import test_config_container\n'), ((116, 16, 116, 87), 'tests.util.create_large_file', 'util.create_large_file', ({(116, 39, 116, 48): 'file_path', (116, 50, 116, 86): 'LARGE_CONTENT_FILE_SIZE_IN_MEBIBYTES'}, {}), '(file_path, LARGE_CONTENT_FILE_SIZE_IN_MEBIBYTES)', False, 'from tests import util\n'), ((398, 19, 398, 47), 'mimetypes.guess_type', 'guess_type', ({(398, 30, 398, 46): 'source_file_path'}, {}), '(source_file_path)', False, 'from mimetypes import guess_type\n'), ((398, 51, 398, 83), 'mimetypes.guess_type', 'guess_type', ({(398, 62, 398, 82): 'downloaded_file_path'}, {}), '(downloaded_file_path)', False, 'from mimetypes import guess_type\n'), ((890, 23, 890, 60), 'random.choice', 'random.choice', ({(890, 37, 890, 59): 'string.ascii_lowercase'}, {}), '(string.ascii_lowercase)', False, 'import random\n'), ((973, 23, 973, 59), 'os.path.exists', 'os.path.exists', ({(973, 38, 973, 58): 'downloaded_file_path'}, {}), '(downloaded_file_path)', False, 'import os\n'), ((974, 23, 974, 89), 'filecmp.cmp', 'filecmp.cmp', (), '', False, 'import filecmp\n'), ((120, 16, 120, 78), 'tests.util.create_large_file', 'util.create_large_file', ({(120, 39, 120, 48): 'file_path', (120, 50, 120, 77): 'MID_SIZED_FILE_IN_MEBIBTYES'}, {}), '(file_path, MID_SIZED_FILE_IN_MEBIBTYES)', False, 'from tests import util\n')] |
yunhaom94/redis-writeanywhere | Extras/benchmark/simple-benchmark.py | 1fefed820811fb89585b2b153d916c3b0fa507a6 | #!/usr/bin/python3
import random
import string
import time
import subprocess
import os
import redis
import threading
def generate_string(string_size, size, dict):
'''
https://stackoverflow.com/questions/16308989/fastest-method-to-generate-big-random-string-with-lower-latin-letters
'''
for i in range(size):
min_lc = ord(b'a')
len_lc = 26
key = bytearray(random.getrandbits(8*string_size).to_bytes(string_size, 'big'))
for i, b in enumerate(key):
key[i] = min_lc + b % len_lc # convert 0..255 to 97..122
key = key.decode()
val = key
dict[key] = val
if __name__ == "__main__":
size = 1000 # TODO: make is an command line argument
port = 7000
FNULL = open(os.devnull, 'w')
string_size = 100000
partition = int(size/4)
print("generating test sets")
d1 = {}
d2 = {}
d3 = {}
d4 = {}
t1 = threading.Thread(target=generate_string, args = (string_size, partition, d1))
t2 = threading.Thread(target=generate_string, args = (string_size, partition, d2))
t3 = threading.Thread(target=generate_string, args = (string_size, partition, d3))
t4 = threading.Thread(target=generate_string, args = (string_size, partition, d4))
t1.start()
t2.start()
t3.start()
t4.start()
t1.join()
t1.join()
t1.join()
t1.join()
test_set = {}
test_set.update(d1)
test_set.update(d2)
test_set.update(d3)
test_set.update(d4)
print(len(test_set))
print("running tests...")
r = redis.StrictRedis(host='localhost', port=port, db=0)
start = time.time()
print("testing set")
for k,v in test_set.items():
r.set(k, v)
r.wait(3, 0)
print("testing get")
for k,v in test_set.items():
r.get(k)
r.wait(3, 0)
end = time.time()
runtime = end - start
ops = size * 2
throughput = float(ops/runtime)
latency = float(1/throughput)
print("total run time: {runtime}s \n\
number of total operations with 50% Set and 50% Get: {ops} \n\
avg. throughput: {throughput} ops/s \n\
avg. latency: {latency} s".format(
runtime=runtime,
ops=ops,
throughput=throughput,
latency=latency
))
| [((50, 9, 50, 86), 'threading.Thread', 'threading.Thread', (), '', False, 'import threading\n'), ((51, 9, 51, 86), 'threading.Thread', 'threading.Thread', (), '', False, 'import threading\n'), ((52, 9, 52, 86), 'threading.Thread', 'threading.Thread', (), '', False, 'import threading\n'), ((53, 9, 53, 86), 'threading.Thread', 'threading.Thread', (), '', False, 'import threading\n'), ((74, 8, 74, 60), 'redis.StrictRedis', 'redis.StrictRedis', (), '', False, 'import redis\n'), ((76, 12, 76, 23), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((88, 10, 88, 21), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((22, 24, 22, 57), 'random.getrandbits', 'random.getrandbits', ({(22, 43, 22, 56): '8 * string_size'}, {}), '(8 * string_size)', False, 'import random\n')] |
Widdershin/CodeEval | challenges/015-setintersection.py | c1c769363763d6f7e1ac5bf3707de2731c3bd926 | """
https://www.codeeval.com/browse/30/
Set Intersection
Challenge Description:
You are given two sorted list of numbers (ascending order). The lists
themselves are comma delimited and the two lists are semicolon
delimited. Print out the intersection of these two sets.
Input Sample:
File containing two lists of ascending order sorted integers, comma
delimited, one per line. E.g.
1,2,3,4;4,5,6
20,21,22;45,46,47
7,8,9;8,9,10,11,12
Output Sample:
Print out the ascending order sorted intersection of the two lists,
one per line. Print empty new line in case the lists have
no intersection. E.g.
4
8,9
"""
###### IO Boilerplate ######
import sys
if len(sys.argv) < 2:
input_file_name = "15-setintersection-in.txt"
else:
input_file_name = sys.argv[1]
with open(input_file_name) as input_file:
input_lines = map(lambda x: x.strip(), filter(lambda x: x != '', input_file.readlines()))
###### /IO Boilerplate ######
def main():
for line in input_lines:
string_sets = line.split(';')
sets = [set(string_set.split(',')) for string_set in string_sets]
intersection = sorted(sets[0].intersection(sets[1]))
print ",".join(intersection)
if __name__ == '__main__':
main()
| [] |
GentleWang1011/eggroll | python/arch/api/table/session.py | 417b029958e0e0ec6f0e1eb03d9ecdf4d5cff47c | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import datetime
import threading
from typing import Iterable
import six
from arch.api import WorkMode, Backend
from arch.api.table.table import Table
from eggroll.core.constants import StoreTypes
def build_session(job_id=None,
work_mode: WorkMode = WorkMode.STANDALONE,
backend: Backend = Backend.EGGROLL2,
persistent_engine: StoreTypes = StoreTypes.ROLLPAIR_LMDB):
from arch.api.table import eggroll_util
if backend.is_eggroll():
from arch.api.table.eggroll import session_impl
eggroll_session = eggroll_util.build_eggroll_session(work_mode=work_mode, job_id=job_id)
session = session_impl.FateSessionImpl(eggroll_session, work_mode, persistent_engine)
elif backend.is_spark():
from arch.api.table.pyspark import session_impl
eggroll_session = eggroll_util.build_eggroll_session(work_mode=work_mode, job_id=job_id)
session = session_impl.FateSessionImpl(eggroll_session, work_mode, persistent_engine)
elif backend.is_eggroll2():
from eggroll.core.session import session_init
from arch.api.table.eggroll2 import session_impl
options = {}
if work_mode == WorkMode.STANDALONE:
options['eggroll.session.deploy.mode'] = "standalone"
elif work_mode == WorkMode.CLUSTER:
options['eggroll.session.deploy.mode'] = "cluster"
er_session = session_init(session_id=job_id, options=options)
session = session_impl.FateSessionImpl(er_session, work_mode, persistent_engine)
else:
raise ValueError(f"work_mode: {work_mode} not supported")
return session
@six.add_metaclass(abc.ABCMeta)
class FateSession(object):
_instance: 'FateSession' = None
__lock = threading.Lock()
@staticmethod
def set_instance(instance):
if not FateSession._instance:
with FateSession.__lock:
if not FateSession._instance:
FateSession._instance = instance
@staticmethod
def get_instance():
return FateSession._instance
@abc.abstractmethod
def get_persistent_engine(self):
pass
@abc.abstractmethod
def table(self,
name,
namespace,
partition,
persistent,
in_place_computing,
create_if_missing,
error_if_exist) -> Table:
pass
@abc.abstractmethod
def parallelize(self,
data: Iterable,
include_key,
name,
partition,
namespace,
persistent,
chunk_size,
in_place_computing,
create_if_missing,
error_if_exist) -> Table:
pass
@abc.abstractmethod
def cleanup(self, name, namespace, persistent):
pass
# noinspection PyPep8Naming
@abc.abstractmethod
def generateUniqueId(self):
pass
@abc.abstractmethod
def get_session_id(self):
pass
@abc.abstractmethod
def stop(self):
pass
@staticmethod
def get_data_table(name, namespace):
"""
return data table instance by table name and table name space
:param name: table name of data table
:param namespace: table name space of data table
:return:
data table instance
"""
return FateSession.get_instance().table(name=name,
namespace=namespace,
create_if_missing=False,
persistent=True,
error_if_exist=False,
in_place_computing=False,
partition=1)
@staticmethod
def save_data_table_meta(kv, data_table_name, data_table_namespace):
"""
save data table meta information
:param kv: v should be serialized by JSON
:param data_table_name: table name of this data table
:param data_table_namespace: table name of this data table
:return:
"""
from arch.api.utils.core import json_dumps
data_meta_table = FateSession.get_instance().table(name="%s.meta" % data_table_name,
namespace=data_table_namespace,
partition=1,
create_if_missing=True,
error_if_exist=False,
persistent=True,
in_place_computing=False)
for k, v in kv.items():
data_meta_table.put(k, json_dumps(v))
@staticmethod
def get_data_table_meta(key, data_table_name, data_table_namespace):
"""
get data table meta information
:param key:
:param data_table_name: table name of this data table
:param data_table_namespace: table name of this data table
:return:
"""
from arch.api.utils.core import json_loads
data_meta_table = FateSession.get_instance().table(name="%s.meta" % data_table_name,
namespace=data_table_namespace,
create_if_missing=True,
error_if_exist=False,
in_place_computing=False,
persistent=True,
partition=1)
if data_meta_table:
value_bytes = data_meta_table.get(key, use_serialize=False)
if value_bytes:
return json_loads(value_bytes)
else:
return None
else:
return None
@staticmethod
def get_data_table_metas(data_table_name, data_table_namespace):
"""
get data table meta information
:param data_table_name: table name of this data table
:param data_table_namespace: table name of this data table
:return:
"""
from arch.api.utils.core import json_loads
data_meta_table = FateSession.get_instance().table(name="%s.meta" % data_table_name,
namespace=data_table_namespace,
partition=1,
persistent=True,
in_place_computing=False,
create_if_missing=True,
error_if_exist=False)
if data_meta_table:
metas = dict()
for k, v in data_meta_table.collect(use_serialize=False):
metas[k] = json_loads(v)
return metas
else:
return None
@staticmethod
def clean_table(namespace, regex_string='*'):
try:
FateSession.get_instance().cleanup(name=regex_string, namespace=namespace, persistent=False)
except Exception as e:
print(e)
@staticmethod
def save_data(kv_data: Iterable,
name,
namespace,
partition=1,
persistent: bool = True,
create_if_missing=True,
error_if_exist=False,
in_version: bool = False,
version_log=None):
"""
save data into data table
:param version_log:
:param in_version:
:param kv_data:
:param name: table name of data table
:param namespace: table namespace of data table
:param partition: number of partition
:param persistent: bool = True,
:param create_if_missing:
:param error_if_exist:
:return:
data table instance
"""
from arch.api.utils import version_control
data_table = FateSession.get_instance().table(name=name,
namespace=namespace,
partition=partition,
persistent=persistent,
in_place_computing=False,
create_if_missing=create_if_missing,
error_if_exist=error_if_exist)
data_table.put_all(kv_data)
if in_version:
version_log = "[AUTO] save data at %s." % datetime.datetime.now() if not version_log else version_log
version_control.save_version(name=name, namespace=namespace, version_log=version_log)
return data_table
| [((61, 1, 61, 31), 'six.add_metaclass', 'six.add_metaclass', ({(61, 19, 61, 30): 'abc.ABCMeta'}, {}), '(abc.ABCMeta)', False, 'import six\n'), ((64, 13, 64, 29), 'threading.Lock', 'threading.Lock', ({}, {}), '()', False, 'import threading\n'), ((37, 26, 37, 96), 'arch.api.table.eggroll_util.build_eggroll_session', 'eggroll_util.build_eggroll_session', (), '', False, 'from arch.api.table import eggroll_util\n'), ((38, 18, 38, 93), 'arch.api.table.eggroll2.session_impl.FateSessionImpl', 'session_impl.FateSessionImpl', ({(38, 47, 38, 62): 'eggroll_session', (38, 64, 38, 73): 'work_mode', (38, 75, 38, 92): 'persistent_engine'}, {}), '(eggroll_session, work_mode, persistent_engine)', False, 'from arch.api.table.eggroll2 import session_impl\n'), ((42, 26, 42, 96), 'arch.api.table.eggroll_util.build_eggroll_session', 'eggroll_util.build_eggroll_session', (), '', False, 'from arch.api.table import eggroll_util\n'), ((43, 18, 43, 93), 'arch.api.table.eggroll2.session_impl.FateSessionImpl', 'session_impl.FateSessionImpl', ({(43, 47, 43, 62): 'eggroll_session', (43, 64, 43, 73): 'work_mode', (43, 75, 43, 92): 'persistent_engine'}, {}), '(eggroll_session, work_mode, persistent_engine)', False, 'from arch.api.table.eggroll2 import session_impl\n'), ((252, 12, 252, 97), 'arch.api.utils.version_control.save_version', 'version_control.save_version', (), '', False, 'from arch.api.utils import version_control\n'), ((53, 21, 53, 69), 'eggroll.core.session.session_init', 'session_init', (), '', False, 'from eggroll.core.session import session_init\n'), ((54, 18, 54, 88), 'arch.api.table.eggroll2.session_impl.FateSessionImpl', 'session_impl.FateSessionImpl', ({(54, 47, 54, 57): 'er_session', (54, 59, 54, 68): 'work_mode', (54, 70, 54, 87): 'persistent_engine'}, {}), '(er_session, work_mode, persistent_engine)', False, 'from arch.api.table.eggroll2 import session_impl\n'), ((158, 35, 158, 48), 'arch.api.utils.core.json_dumps', 'json_dumps', ({(158, 46, 158, 47): 'v'}, {}), '(v)', False, 'from arch.api.utils.core import json_dumps\n'), ((180, 23, 180, 46), 'arch.api.utils.core.json_loads', 'json_loads', ({(180, 34, 180, 45): 'value_bytes'}, {}), '(value_bytes)', False, 'from arch.api.utils.core import json_loads\n'), ((205, 27, 205, 40), 'arch.api.utils.core.json_loads', 'json_loads', ({(205, 38, 205, 39): 'v'}, {}), '(v)', False, 'from arch.api.utils.core import json_loads\n'), ((251, 54, 251, 77), 'datetime.datetime.now', 'datetime.datetime.now', ({}, {}), '()', False, 'import datetime\n')] |
Asap7772/rail-rl-franka-eval | experiments/vitchyr/vaes/learn_swirl_vae.py | 4bf99072376828193d05b53cf83c7e8f4efbd3ba | """
VAE on the swirl task.
Basically, VAEs don't work. It's probably because the prior isn't very good
and/or because the learning signal is pretty weak when both the encoder and
decoder change quickly. However, I tried also alternating between the two,
and that didn't seem to help.
"""
from torch.distributions import Normal
from torch.optim import Adam
import torch
import numpy as np
import matplotlib.pyplot as plt
from torch import nn as nn
import railrl.torch.pytorch_util as ptu
SWIRL_RATE = 1
T = 10
BS = 128
N_BATCHES = 2000
N_VIS = 1000
HIDDEN_SIZE = 32
VERBOSE = False
def swirl_data(batch_size):
t = np.random.uniform(size=batch_size, low=0, high=T)
x = t * np.cos(t * SWIRL_RATE) / T
y = t * np.sin(t * SWIRL_RATE) / T
data = np.array([x, y]).T
noise = np.random.randn(batch_size, 2) / (T * 2)
return data + noise, t.reshape(-1, 1)
def swirl_t_to_data(t):
x = t * np.cos(t * SWIRL_RATE) / T
y = t * np.sin(t * SWIRL_RATE) / T
return np.array([x, y]).T
def kl_to_prior(means, log_stds, stds):
"""
KL between a Gaussian and a standard Gaussian.
https://stats.stackexchange.com/questions/60680/kl-divergence-between-two-multivariate-gaussians
"""
return 0.5 * (
- 2 * log_stds # log std_prior = 0
- 1 # d = 1
+ stds ** 2
+ means ** 2
)
class Encoder(nn.Sequential):
def encode(self, x):
return self.get_encoding_and_suff_stats(x)[0]
def get_encoding_and_suff_stats(self, x):
output = self(x)
means, log_stds = (
output[:, 0:1], output[:, 1:2]
)
stds = log_stds.exp()
epsilon = ptu.Variable(torch.randn(*means.size()))
latents = epsilon * stds + means
latents = latents
return latents, means, log_stds, stds
class Decoder(nn.Sequential):
def decode(self, latents):
output = self(latents)
means, log_stds = output[:, 0:2], output[:, 2:4]
distribution = Normal(means, log_stds.exp())
return distribution.sample()
def t_to_xy(t):
if len(t.shape) == 2:
t = t[:, 0]
x = t * np.cos(t * SWIRL_RATE) / T
y = t * np.sin(t * SWIRL_RATE) / T
return np.array([x, y]).T
def pretrain_encoder(encoder, opt):
losses = []
for _ in range(1000):
x_np, y_np = swirl_data(BS)
x = ptu.np_to_var(x_np)
y = ptu.np_to_var(y_np)
y_hat = encoder.encode(x)
loss = ((y_hat - y) ** 2).mean()
opt.zero_grad()
loss.backward()
opt.step()
losses.append(loss.data.numpy())
if VERBOSE:
x_np, y_np = swirl_data(N_VIS)
x = ptu.np_to_var(x_np)
y_hat = encoder.encode(x)
y_hat_np = y_hat.data.numpy()
x_hat_np = t_to_xy(y_hat_np[:, 0])
plt.subplot(2, 1, 1)
plt.plot(np.array(losses))
plt.title("Training Loss")
plt.subplot(2, 1, 2)
plt.plot(x_np[:, 0], x_np[:, 1], '.')
plt.plot(x_hat_np[:, 0], x_hat_np[:, 1], '.')
plt.title("Samples")
plt.legend(["Samples", "Estimates"])
plt.show()
def train_encoder(encoder, decoder, encoder_opt):
batch, true_latents = swirl_data(BS)
batch = ptu.np_to_var(batch)
latents, means, log_stds, stds = encoder.get_encoding_and_suff_stats(
batch
)
kl = kl_to_prior(means, log_stds, stds)
latents = encoder.encode(batch)
decoder_output = decoder(latents)
decoder_means = decoder_output[:, 0:2]
decoder_log_stds = decoder_output[:, 2:4]
distribution = Normal(decoder_means, decoder_log_stds.exp())
reconstruction_log_prob = distribution.log_prob(batch).sum(dim=1)
# elbo = - kl + reconstruction_log_prob
# loss = - elbo.mean()
loss = - reconstruction_log_prob.mean()
# This is the second place where we cheat:
latent_loss = ((ptu.np_to_var(true_latents) - latents) ** 2).mean()
loss = loss# + latent_loss
encoder_opt.zero_grad()
loss.backward()
encoder_opt.step()
return loss
def train_decoder(encoder, decoder, decoder_opt):
batch, true_latents = swirl_data(BS)
batch = ptu.np_to_var(batch)
latents = encoder.encode(batch)
decoder_output = decoder(latents)
decoder_means = decoder_output[:, 0:2]
decoder_log_stds = decoder_output[:, 2:4]
distribution = Normal(decoder_means, decoder_log_stds.exp())
reconstruction_log_prob = distribution.log_prob(batch).sum(dim=1)
loss = - reconstruction_log_prob.mean()
decoder_opt.zero_grad()
loss.backward()
decoder_opt.step()
return loss
def train_alternating(*_):
encoder = Encoder(
nn.Linear(2, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, 2),
)
encoder_opt = Adam(encoder.parameters())
decoder = Decoder(
nn.Linear(1, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, 4),
)
decoder_opt = Adam(decoder.parameters())
encoder_losses = []
decoder_losses = []
for _ in range(100):
for _ in range(N_BATCHES):
encoder_losses.append(
train_encoder(encoder, decoder, encoder_opt).data.numpy()
)
for _ in range(N_BATCHES):
decoder_losses.append(
train_decoder(encoder, decoder, decoder_opt).data.numpy()
)
# Visualize
vis_samples_np, true_latents_np = swirl_data(N_VIS)
vis_samples = ptu.np_to_var(vis_samples_np)
true_xy_mean_np = t_to_xy(true_latents_np)
latents = encoder.encode(vis_samples)
reconstructed_samples = decoder.decode(latents).data.numpy()
generated_samples = decoder.decode(
ptu.Variable(torch.randn(*latents.shape))
).data.numpy()
plt.subplot(2, 2, 1)
plt.plot(np.array(encoder_losses))
plt.title("Encoder Loss")
plt.subplot(2, 2, 2)
plt.plot(np.array(decoder_losses))
plt.title("Decoder Loss")
plt.subplot(2, 3, 4)
plt.plot(generated_samples[:, 0], generated_samples[:, 1], '.')
plt.title("Generated Samples")
plt.subplot(2, 3, 5)
plt.plot(reconstructed_samples[:, 0], reconstructed_samples[:, 1], '.')
estimated_means = t_to_xy(latents.data.numpy())
# plt.plot(estimated_means[:, 0], estimated_means[:, 1], '.')
plt.title("Reconstruction")
# plt.legend(["Samples", "Projected Latents"])
plt.subplot(2, 3, 6)
plt.plot(vis_samples_np[:, 0], vis_samples_np[:, 1], '.')
plt.plot(true_xy_mean_np[:, 0], true_xy_mean_np[:, 1], '.')
plt.title("Original Samples")
plt.legend(["Original", "True means"])
plt.show()
def train():
encoder = Encoder(
nn.Linear(2, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, 2),
)
encoder_opt = Adam(encoder.parameters())
# This is the first place that we cheat. However, this pretraining isn't
# needed if you just add the loss to the training (see below)
# pretrain_encoder(encoder, encoder_opt)
decoder = Decoder(
nn.Linear(1, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.ReLU(),
nn.Linear(HIDDEN_SIZE, 4),
)
decoder_opt = Adam(decoder.parameters())
print("Done training encoder")
losses = []
kls = []
log_probs = []
for _ in range(N_BATCHES):
batch, true_latents = swirl_data(BS)
batch = ptu.np_to_var(batch)
latents, means, log_stds, stds = encoder.get_encoding_and_suff_stats(
batch
)
kl = kl_to_prior(means, log_stds, stds)
latents = encoder.encode(batch)
# decoder_output = decoder(latents.detach())
decoder_output = decoder(latents)
decoder_means = decoder_output[:, 0:2]
decoder_log_stds = decoder_output[:, 2:4]
distribution = Normal(decoder_means, decoder_log_stds.exp())
reconstruction_log_prob = distribution.log_prob(batch).sum(dim=1)
elbo = - kl + reconstruction_log_prob
loss = - elbo.mean()
# This is the second place where we cheat:
latent_loss = ((ptu.np_to_var(true_latents) - latents) ** 2).mean()
loss = loss + latent_loss
decoder_opt.zero_grad()
encoder_opt.zero_grad()
loss.backward()
decoder_opt.step()
encoder_opt.step()
losses.append(loss.data.numpy())
kls.append(kl.mean().data.numpy())
log_probs.append(reconstruction_log_prob.mean().data.numpy())
# Visualize
vis_samples_np, true_latents_np = swirl_data(N_VIS)
vis_samples = ptu.np_to_var(vis_samples_np)
true_xy_mean_np = t_to_xy(true_latents_np)
latents = encoder.encode(vis_samples)
reconstructed_samples = decoder.decode(latents).data.numpy()
generated_samples = decoder.decode(
ptu.Variable(torch.randn(*latents.shape))
).data.numpy()
plt.subplot(2, 3, 1)
plt.plot(np.array(losses))
plt.title("Training Loss")
plt.subplot(2, 3, 2)
plt.plot(np.array(kls))
plt.title("KLs")
plt.subplot(2, 3, 3)
plt.plot(np.array(log_probs))
plt.title("Log Probs")
plt.subplot(2, 3, 4)
plt.plot(generated_samples[:, 0], generated_samples[:, 1], '.')
plt.title("Generated Samples")
plt.subplot(2, 3, 5)
plt.plot(reconstructed_samples[:, 0], reconstructed_samples[:, 1], '.')
estimated_means = t_to_xy(latents.data.numpy())
plt.plot(estimated_means[:, 0], estimated_means[:, 1], '.')
plt.title("Reconstruction")
plt.subplot(2, 3, 6)
plt.plot(vis_samples_np[:, 0], vis_samples_np[:, 1], '.')
plt.plot(true_xy_mean_np[:, 0], true_xy_mean_np[:, 1], '.')
plt.title("Original Samples")
plt.legend(["Original", "True means"])
plt.show()
if __name__ == '__main__':
train_alternating()
# train()
| [((27, 8, 27, 57), 'numpy.random.uniform', 'np.random.uniform', (), '', True, 'import numpy as np\n'), ((122, 12, 122, 32), 'railrl.torch.pytorch_util.np_to_var', 'ptu.np_to_var', ({(122, 26, 122, 31): 'batch'}, {}), '(batch)', True, 'import railrl.torch.pytorch_util as ptu\n'), ((150, 12, 150, 32), 'railrl.torch.pytorch_util.np_to_var', 'ptu.np_to_var', ({(150, 26, 150, 31): 'batch'}, {}), '(batch)', True, 'import railrl.torch.pytorch_util as ptu\n'), ((206, 18, 206, 47), 'railrl.torch.pytorch_util.np_to_var', 'ptu.np_to_var', ({(206, 32, 206, 46): 'vis_samples_np'}, {}), '(vis_samples_np)', True, 'import railrl.torch.pytorch_util as ptu\n'), ((214, 4, 214, 24), 'matplotlib.pyplot.subplot', 'plt.subplot', ({(214, 16, 214, 17): '(2)', (214, 19, 214, 20): '(2)', (214, 22, 214, 23): '(1)'}, {}), '(2, 2, 1)', True, 'import matplotlib.pyplot as plt\n'), ((216, 4, 216, 29), 'matplotlib.pyplot.title', 'plt.title', ({(216, 14, 216, 28): '"""Encoder Loss"""'}, {}), "('Encoder Loss')", True, 'import matplotlib.pyplot as plt\n'), ((217, 4, 217, 24), 'matplotlib.pyplot.subplot', 'plt.subplot', ({(217, 16, 217, 17): '(2)', (217, 19, 217, 20): '(2)', (217, 22, 217, 23): '(2)'}, {}), '(2, 2, 2)', True, 'import matplotlib.pyplot as plt\n'), ((219, 4, 219, 29), 'matplotlib.pyplot.title', 'plt.title', ({(219, 14, 219, 28): '"""Decoder Loss"""'}, {}), "('Decoder Loss')", True, 'import matplotlib.pyplot as plt\n'), ((221, 4, 221, 24), 'matplotlib.pyplot.subplot', 'plt.subplot', ({(221, 16, 221, 17): '(2)', (221, 19, 221, 20): '(3)', (221, 22, 221, 23): '(4)'}, {}), '(2, 3, 4)', True, 'import matplotlib.pyplot as plt\n'), ((222, 4, 222, 67), 'matplotlib.pyplot.plot', 'plt.plot', ({(222, 13, 222, 36): 'generated_samples[:, (0)]', (222, 38, 222, 61): 'generated_samples[:, (1)]', (222, 63, 222, 66): '"""."""'}, {}), "(generated_samples[:, (0)], generated_samples[:, (1)], '.')", True, 'import matplotlib.pyplot as plt\n'), ((223, 4, 223, 34), 'matplotlib.pyplot.title', 'plt.title', ({(223, 14, 223, 33): '"""Generated Samples"""'}, {}), "('Generated Samples')", True, 'import matplotlib.pyplot as plt\n'), ((224, 4, 224, 24), 'matplotlib.pyplot.subplot', 'plt.subplot', ({(224, 16, 224, 17): '(2)', (224, 19, 224, 20): '(3)', (224, 22, 224, 23): '(5)'}, {}), '(2, 3, 5)', True, 'import matplotlib.pyplot as plt\n'), ((225, 4, 225, 75), 'matplotlib.pyplot.plot', 'plt.plot', ({(225, 13, 225, 40): 'reconstructed_samples[:, (0)]', (225, 42, 225, 69): 'reconstructed_samples[:, (1)]', (225, 71, 225, 74): '"""."""'}, {}), "(reconstructed_samples[:, (0)], reconstructed_samples[:, (1)], '.')", True, 'import matplotlib.pyplot as plt\n'), ((228, 4, 228, 31), 'matplotlib.pyplot.title', 'plt.title', ({(228, 14, 228, 30): '"""Reconstruction"""'}, {}), "('Reconstruction')", True, 'import matplotlib.pyplot as plt\n'), ((230, 4, 230, 24), 'matplotlib.pyplot.subplot', 'plt.subplot', ({(230, 16, 230, 17): '(2)', (230, 19, 230, 20): '(3)', (230, 22, 230, 23): '(6)'}, {}), '(2, 3, 6)', True, 'import matplotlib.pyplot as plt\n'), ((231, 4, 231, 61), 'matplotlib.pyplot.plot', 'plt.plot', ({(231, 13, 231, 33): 'vis_samples_np[:, (0)]', (231, 35, 231, 55): 'vis_samples_np[:, (1)]', (231, 57, 231, 60): '"""."""'}, {}), "(vis_samples_np[:, (0)], vis_samples_np[:, (1)], '.')", True, 'import matplotlib.pyplot as plt\n'), ((232, 4, 232, 63), 'matplotlib.pyplot.plot', 'plt.plot', ({(232, 13, 232, 34): 'true_xy_mean_np[:, (0)]', (232, 36, 232, 57): 'true_xy_mean_np[:, (1)]', (232, 59, 232, 62): '"""."""'}, {}), "(true_xy_mean_np[:, (0)], true_xy_mean_np[:, (1)], '.')", True, 'import matplotlib.pyplot as plt\n'), ((233, 4, 233, 33), 'matplotlib.pyplot.title', 'plt.title', ({(233, 14, 233, 32): '"""Original Samples"""'}, {}), "('Original Samples')", True, 'import matplotlib.pyplot as plt\n'), ((234, 4, 234, 42), 'matplotlib.pyplot.legend', 'plt.legend', ({(234, 15, 234, 41): "['Original', 'True means']"}, {}), "(['Original', 'True means'])", True, 'import matplotlib.pyplot as plt\n'), ((235, 4, 235, 14), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((306, 18, 306, 47), 'railrl.torch.pytorch_util.np_to_var', 'ptu.np_to_var', ({(306, 32, 306, 46): 'vis_samples_np'}, {}), '(vis_samples_np)', True, 'import railrl.torch.pytorch_util as ptu\n'), ((314, 4, 314, 24), 'matplotlib.pyplot.subplot', 'plt.subplot', ({(314, 16, 314, 17): '(2)', (314, 19, 314, 20): '(3)', (314, 22, 314, 23): '(1)'}, {}), '(2, 3, 1)', True, 'import matplotlib.pyplot as plt\n'), ((316, 4, 316, 30), 'matplotlib.pyplot.title', 'plt.title', ({(316, 14, 316, 29): '"""Training Loss"""'}, {}), "('Training Loss')", True, 'import matplotlib.pyplot as plt\n'), ((317, 4, 317, 24), 'matplotlib.pyplot.subplot', 'plt.subplot', ({(317, 16, 317, 17): '(2)', (317, 19, 317, 20): '(3)', (317, 22, 317, 23): '(2)'}, {}), '(2, 3, 2)', True, 'import matplotlib.pyplot as plt\n'), ((319, 4, 319, 20), 'matplotlib.pyplot.title', 'plt.title', ({(319, 14, 319, 19): '"""KLs"""'}, {}), "('KLs')", True, 'import matplotlib.pyplot as plt\n'), ((320, 4, 320, 24), 'matplotlib.pyplot.subplot', 'plt.subplot', ({(320, 16, 320, 17): '(2)', (320, 19, 320, 20): '(3)', (320, 22, 320, 23): '(3)'}, {}), '(2, 3, 3)', True, 'import matplotlib.pyplot as plt\n'), ((322, 4, 322, 26), 'matplotlib.pyplot.title', 'plt.title', ({(322, 14, 322, 25): '"""Log Probs"""'}, {}), "('Log Probs')", True, 'import matplotlib.pyplot as plt\n'), ((324, 4, 324, 24), 'matplotlib.pyplot.subplot', 'plt.subplot', ({(324, 16, 324, 17): '(2)', (324, 19, 324, 20): '(3)', (324, 22, 324, 23): '(4)'}, {}), '(2, 3, 4)', True, 'import matplotlib.pyplot as plt\n'), ((325, 4, 325, 67), 'matplotlib.pyplot.plot', 'plt.plot', ({(325, 13, 325, 36): 'generated_samples[:, (0)]', (325, 38, 325, 61): 'generated_samples[:, (1)]', (325, 63, 325, 66): '"""."""'}, {}), "(generated_samples[:, (0)], generated_samples[:, (1)], '.')", True, 'import matplotlib.pyplot as plt\n'), ((326, 4, 326, 34), 'matplotlib.pyplot.title', 'plt.title', ({(326, 14, 326, 33): '"""Generated Samples"""'}, {}), "('Generated Samples')", True, 'import matplotlib.pyplot as plt\n'), ((327, 4, 327, 24), 'matplotlib.pyplot.subplot', 'plt.subplot', ({(327, 16, 327, 17): '(2)', (327, 19, 327, 20): '(3)', (327, 22, 327, 23): '(5)'}, {}), '(2, 3, 5)', True, 'import matplotlib.pyplot as plt\n'), ((328, 4, 328, 75), 'matplotlib.pyplot.plot', 'plt.plot', ({(328, 13, 328, 40): 'reconstructed_samples[:, (0)]', (328, 42, 328, 69): 'reconstructed_samples[:, (1)]', (328, 71, 328, 74): '"""."""'}, {}), "(reconstructed_samples[:, (0)], reconstructed_samples[:, (1)], '.')", True, 'import matplotlib.pyplot as plt\n'), ((330, 4, 330, 63), 'matplotlib.pyplot.plot', 'plt.plot', ({(330, 13, 330, 34): 'estimated_means[:, (0)]', (330, 36, 330, 57): 'estimated_means[:, (1)]', (330, 59, 330, 62): '"""."""'}, {}), "(estimated_means[:, (0)], estimated_means[:, (1)], '.')", True, 'import matplotlib.pyplot as plt\n'), ((331, 4, 331, 31), 'matplotlib.pyplot.title', 'plt.title', ({(331, 14, 331, 30): '"""Reconstruction"""'}, {}), "('Reconstruction')", True, 'import matplotlib.pyplot as plt\n'), ((332, 4, 332, 24), 'matplotlib.pyplot.subplot', 'plt.subplot', ({(332, 16, 332, 17): '(2)', (332, 19, 332, 20): '(3)', (332, 22, 332, 23): '(6)'}, {}), '(2, 3, 6)', True, 'import matplotlib.pyplot as plt\n'), ((333, 4, 333, 61), 'matplotlib.pyplot.plot', 'plt.plot', ({(333, 13, 333, 33): 'vis_samples_np[:, (0)]', (333, 35, 333, 55): 'vis_samples_np[:, (1)]', (333, 57, 333, 60): '"""."""'}, {}), "(vis_samples_np[:, (0)], vis_samples_np[:, (1)], '.')", True, 'import matplotlib.pyplot as plt\n'), ((334, 4, 334, 63), 'matplotlib.pyplot.plot', 'plt.plot', ({(334, 13, 334, 34): 'true_xy_mean_np[:, (0)]', (334, 36, 334, 57): 'true_xy_mean_np[:, (1)]', (334, 59, 334, 62): '"""."""'}, {}), "(true_xy_mean_np[:, (0)], true_xy_mean_np[:, (1)], '.')", True, 'import matplotlib.pyplot as plt\n'), ((335, 4, 335, 33), 'matplotlib.pyplot.title', 'plt.title', ({(335, 14, 335, 32): '"""Original Samples"""'}, {}), "('Original Samples')", True, 'import matplotlib.pyplot as plt\n'), ((336, 4, 336, 42), 'matplotlib.pyplot.legend', 'plt.legend', ({(336, 15, 336, 41): "['Original', 'True means']"}, {}), "(['Original', 'True means'])", True, 'import matplotlib.pyplot as plt\n'), ((337, 4, 337, 14), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((30, 11, 30, 27), 'numpy.array', 'np.array', ({(30, 20, 30, 26): '[x, y]'}, {}), '([x, y])', True, 'import numpy as np\n'), ((31, 12, 31, 42), 'numpy.random.randn', 'np.random.randn', ({(31, 28, 31, 38): 'batch_size', (31, 40, 31, 41): '(2)'}, {}), '(batch_size, 2)', True, 'import numpy as np\n'), ((38, 11, 38, 27), 'numpy.array', 'np.array', ({(38, 20, 38, 26): '[x, y]'}, {}), '([x, y])', True, 'import numpy as np\n'), ((84, 11, 84, 27), 'numpy.array', 'np.array', ({(84, 20, 84, 26): '[x, y]'}, {}), '([x, y])', True, 'import numpy as np\n'), ((91, 12, 91, 31), 'railrl.torch.pytorch_util.np_to_var', 'ptu.np_to_var', ({(91, 26, 91, 30): 'x_np'}, {}), '(x_np)', True, 'import railrl.torch.pytorch_util as ptu\n'), ((92, 12, 92, 31), 'railrl.torch.pytorch_util.np_to_var', 'ptu.np_to_var', ({(92, 26, 92, 30): 'y_np'}, {}), '(y_np)', True, 'import railrl.torch.pytorch_util as ptu\n'), ((103, 12, 103, 31), 'railrl.torch.pytorch_util.np_to_var', 'ptu.np_to_var', ({(103, 26, 103, 30): 'x_np'}, {}), '(x_np)', True, 'import railrl.torch.pytorch_util as ptu\n'), ((108, 8, 108, 28), 'matplotlib.pyplot.subplot', 'plt.subplot', ({(108, 20, 108, 21): '(2)', (108, 23, 108, 24): '(1)', (108, 26, 108, 27): '(1)'}, {}), '(2, 1, 1)', True, 'import matplotlib.pyplot as plt\n'), ((110, 8, 110, 34), 'matplotlib.pyplot.title', 'plt.title', ({(110, 18, 110, 33): '"""Training Loss"""'}, {}), "('Training Loss')", True, 'import matplotlib.pyplot as plt\n'), ((112, 8, 112, 28), 'matplotlib.pyplot.subplot', 'plt.subplot', ({(112, 20, 112, 21): '(2)', (112, 23, 112, 24): '(1)', (112, 26, 112, 27): '(2)'}, {}), '(2, 1, 2)', True, 'import matplotlib.pyplot as plt\n'), ((113, 8, 113, 45), 'matplotlib.pyplot.plot', 'plt.plot', ({(113, 17, 113, 27): 'x_np[:, (0)]', (113, 29, 113, 39): 'x_np[:, (1)]', (113, 41, 113, 44): '"""."""'}, {}), "(x_np[:, (0)], x_np[:, (1)], '.')", True, 'import matplotlib.pyplot as plt\n'), ((114, 8, 114, 53), 'matplotlib.pyplot.plot', 'plt.plot', ({(114, 17, 114, 31): 'x_hat_np[:, (0)]', (114, 33, 114, 47): 'x_hat_np[:, (1)]', (114, 49, 114, 52): '"""."""'}, {}), "(x_hat_np[:, (0)], x_hat_np[:, (1)], '.')", True, 'import matplotlib.pyplot as plt\n'), ((115, 8, 115, 28), 'matplotlib.pyplot.title', 'plt.title', ({(115, 18, 115, 27): '"""Samples"""'}, {}), "('Samples')", True, 'import matplotlib.pyplot as plt\n'), ((116, 8, 116, 44), 'matplotlib.pyplot.legend', 'plt.legend', ({(116, 19, 116, 43): "['Samples', 'Estimates']"}, {}), "(['Samples', 'Estimates'])", True, 'import matplotlib.pyplot as plt\n'), ((117, 8, 117, 18), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((168, 8, 168, 33), 'torch.nn.Linear', 'nn.Linear', ({(168, 18, 168, 19): '2', (168, 21, 168, 32): 'HIDDEN_SIZE'}, {}), '(2, HIDDEN_SIZE)', True, 'from torch import nn as nn\n'), ((169, 8, 169, 17), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'from torch import nn as nn\n'), ((170, 8, 170, 43), 'torch.nn.Linear', 'nn.Linear', ({(170, 18, 170, 29): 'HIDDEN_SIZE', (170, 31, 170, 42): 'HIDDEN_SIZE'}, {}), '(HIDDEN_SIZE, HIDDEN_SIZE)', True, 'from torch import nn as nn\n'), ((171, 8, 171, 17), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'from torch import nn as nn\n'), ((172, 8, 172, 43), 'torch.nn.Linear', 'nn.Linear', ({(172, 18, 172, 29): 'HIDDEN_SIZE', (172, 31, 172, 42): 'HIDDEN_SIZE'}, {}), '(HIDDEN_SIZE, HIDDEN_SIZE)', True, 'from torch import nn as nn\n'), ((173, 8, 173, 17), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'from torch import nn as nn\n'), ((174, 8, 174, 43), 'torch.nn.Linear', 'nn.Linear', ({(174, 18, 174, 29): 'HIDDEN_SIZE', (174, 31, 174, 42): 'HIDDEN_SIZE'}, {}), '(HIDDEN_SIZE, HIDDEN_SIZE)', True, 'from torch import nn as nn\n'), ((175, 8, 175, 17), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'from torch import nn as nn\n'), ((176, 8, 176, 33), 'torch.nn.Linear', 'nn.Linear', ({(176, 18, 176, 29): 'HIDDEN_SIZE', (176, 31, 176, 32): '2'}, {}), '(HIDDEN_SIZE, 2)', True, 'from torch import nn as nn\n'), ((180, 8, 180, 33), 'torch.nn.Linear', 'nn.Linear', ({(180, 18, 180, 19): '1', (180, 21, 180, 32): 'HIDDEN_SIZE'}, {}), '(1, HIDDEN_SIZE)', True, 'from torch import nn as nn\n'), ((181, 8, 181, 17), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'from torch import nn as nn\n'), ((182, 8, 182, 43), 'torch.nn.Linear', 'nn.Linear', ({(182, 18, 182, 29): 'HIDDEN_SIZE', (182, 31, 182, 42): 'HIDDEN_SIZE'}, {}), '(HIDDEN_SIZE, HIDDEN_SIZE)', True, 'from torch import nn as nn\n'), ((183, 8, 183, 17), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'from torch import nn as nn\n'), ((184, 8, 184, 43), 'torch.nn.Linear', 'nn.Linear', ({(184, 18, 184, 29): 'HIDDEN_SIZE', (184, 31, 184, 42): 'HIDDEN_SIZE'}, {}), '(HIDDEN_SIZE, HIDDEN_SIZE)', True, 'from torch import nn as nn\n'), ((185, 8, 185, 17), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'from torch import nn as nn\n'), ((186, 8, 186, 43), 'torch.nn.Linear', 'nn.Linear', ({(186, 18, 186, 29): 'HIDDEN_SIZE', (186, 31, 186, 42): 'HIDDEN_SIZE'}, {}), '(HIDDEN_SIZE, HIDDEN_SIZE)', True, 'from torch import nn as nn\n'), ((187, 8, 187, 17), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'from torch import nn as nn\n'), ((188, 8, 188, 33), 'torch.nn.Linear', 'nn.Linear', ({(188, 18, 188, 29): 'HIDDEN_SIZE', (188, 31, 188, 32): '4'}, {}), '(HIDDEN_SIZE, 4)', True, 'from torch import nn as nn\n'), ((215, 13, 215, 37), 'numpy.array', 'np.array', ({(215, 22, 215, 36): 'encoder_losses'}, {}), '(encoder_losses)', True, 'import numpy as np\n'), ((218, 13, 218, 37), 'numpy.array', 'np.array', ({(218, 22, 218, 36): 'decoder_losses'}, {}), '(decoder_losses)', True, 'import numpy as np\n'), ((240, 8, 240, 33), 'torch.nn.Linear', 'nn.Linear', ({(240, 18, 240, 19): '2', (240, 21, 240, 32): 'HIDDEN_SIZE'}, {}), '(2, HIDDEN_SIZE)', True, 'from torch import nn as nn\n'), ((241, 8, 241, 17), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'from torch import nn as nn\n'), ((242, 8, 242, 43), 'torch.nn.Linear', 'nn.Linear', ({(242, 18, 242, 29): 'HIDDEN_SIZE', (242, 31, 242, 42): 'HIDDEN_SIZE'}, {}), '(HIDDEN_SIZE, HIDDEN_SIZE)', True, 'from torch import nn as nn\n'), ((243, 8, 243, 17), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'from torch import nn as nn\n'), ((244, 8, 244, 43), 'torch.nn.Linear', 'nn.Linear', ({(244, 18, 244, 29): 'HIDDEN_SIZE', (244, 31, 244, 42): 'HIDDEN_SIZE'}, {}), '(HIDDEN_SIZE, HIDDEN_SIZE)', True, 'from torch import nn as nn\n'), ((245, 8, 245, 17), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'from torch import nn as nn\n'), ((246, 8, 246, 43), 'torch.nn.Linear', 'nn.Linear', ({(246, 18, 246, 29): 'HIDDEN_SIZE', (246, 31, 246, 42): 'HIDDEN_SIZE'}, {}), '(HIDDEN_SIZE, HIDDEN_SIZE)', True, 'from torch import nn as nn\n'), ((247, 8, 247, 17), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'from torch import nn as nn\n'), ((248, 8, 248, 33), 'torch.nn.Linear', 'nn.Linear', ({(248, 18, 248, 29): 'HIDDEN_SIZE', (248, 31, 248, 32): '2'}, {}), '(HIDDEN_SIZE, 2)', True, 'from torch import nn as nn\n'), ((255, 8, 255, 33), 'torch.nn.Linear', 'nn.Linear', ({(255, 18, 255, 19): '1', (255, 21, 255, 32): 'HIDDEN_SIZE'}, {}), '(1, HIDDEN_SIZE)', True, 'from torch import nn as nn\n'), ((256, 8, 256, 17), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'from torch import nn as nn\n'), ((257, 8, 257, 43), 'torch.nn.Linear', 'nn.Linear', ({(257, 18, 257, 29): 'HIDDEN_SIZE', (257, 31, 257, 42): 'HIDDEN_SIZE'}, {}), '(HIDDEN_SIZE, HIDDEN_SIZE)', True, 'from torch import nn as nn\n'), ((258, 8, 258, 17), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'from torch import nn as nn\n'), ((259, 8, 259, 43), 'torch.nn.Linear', 'nn.Linear', ({(259, 18, 259, 29): 'HIDDEN_SIZE', (259, 31, 259, 42): 'HIDDEN_SIZE'}, {}), '(HIDDEN_SIZE, HIDDEN_SIZE)', True, 'from torch import nn as nn\n'), ((260, 8, 260, 17), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'from torch import nn as nn\n'), ((261, 8, 261, 43), 'torch.nn.Linear', 'nn.Linear', ({(261, 18, 261, 29): 'HIDDEN_SIZE', (261, 31, 261, 42): 'HIDDEN_SIZE'}, {}), '(HIDDEN_SIZE, HIDDEN_SIZE)', True, 'from torch import nn as nn\n'), ((262, 8, 262, 17), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'from torch import nn as nn\n'), ((263, 8, 263, 33), 'torch.nn.Linear', 'nn.Linear', ({(263, 18, 263, 29): 'HIDDEN_SIZE', (263, 31, 263, 32): '4'}, {}), '(HIDDEN_SIZE, 4)', True, 'from torch import nn as nn\n'), ((273, 16, 273, 36), 'railrl.torch.pytorch_util.np_to_var', 'ptu.np_to_var', ({(273, 30, 273, 35): 'batch'}, {}), '(batch)', True, 'import railrl.torch.pytorch_util as ptu\n'), ((315, 13, 315, 29), 'numpy.array', 'np.array', ({(315, 22, 315, 28): 'losses'}, {}), '(losses)', True, 'import numpy as np\n'), ((318, 13, 318, 26), 'numpy.array', 'np.array', ({(318, 22, 318, 25): 'kls'}, {}), '(kls)', True, 'import numpy as np\n'), ((321, 13, 321, 32), 'numpy.array', 'np.array', ({(321, 22, 321, 31): 'log_probs'}, {}), '(log_probs)', True, 'import numpy as np\n'), ((28, 12, 28, 34), 'numpy.cos', 'np.cos', ({(28, 19, 28, 33): '(t * SWIRL_RATE)'}, {}), '(t * SWIRL_RATE)', True, 'import numpy as np\n'), ((29, 12, 29, 34), 'numpy.sin', 'np.sin', ({(29, 19, 29, 33): '(t * SWIRL_RATE)'}, {}), '(t * SWIRL_RATE)', True, 'import numpy as np\n'), ((36, 12, 36, 34), 'numpy.cos', 'np.cos', ({(36, 19, 36, 33): '(t * SWIRL_RATE)'}, {}), '(t * SWIRL_RATE)', True, 'import numpy as np\n'), ((37, 12, 37, 34), 'numpy.sin', 'np.sin', ({(37, 19, 37, 33): '(t * SWIRL_RATE)'}, {}), '(t * SWIRL_RATE)', True, 'import numpy as np\n'), ((82, 12, 82, 34), 'numpy.cos', 'np.cos', ({(82, 19, 82, 33): '(t * SWIRL_RATE)'}, {}), '(t * SWIRL_RATE)', True, 'import numpy as np\n'), ((83, 12, 83, 34), 'numpy.sin', 'np.sin', ({(83, 19, 83, 33): '(t * SWIRL_RATE)'}, {}), '(t * SWIRL_RATE)', True, 'import numpy as np\n'), ((109, 17, 109, 33), 'numpy.array', 'np.array', ({(109, 26, 109, 32): 'losses'}, {}), '(losses)', True, 'import numpy as np\n'), ((140, 20, 140, 47), 'railrl.torch.pytorch_util.np_to_var', 'ptu.np_to_var', ({(140, 34, 140, 46): 'true_latents'}, {}), '(true_latents)', True, 'import railrl.torch.pytorch_util as ptu\n'), ((211, 21, 211, 48), 'torch.randn', 'torch.randn', ({(211, 33, 211, 47): '*latents.shape'}, {}), '(*latents.shape)', False, 'import torch\n'), ((292, 24, 292, 51), 'railrl.torch.pytorch_util.np_to_var', 'ptu.np_to_var', ({(292, 38, 292, 50): 'true_latents'}, {}), '(true_latents)', True, 'import railrl.torch.pytorch_util as ptu\n'), ((311, 21, 311, 48), 'torch.randn', 'torch.randn', ({(311, 33, 311, 47): '*latents.shape'}, {}), '(*latents.shape)', False, 'import torch\n')] |
JosephBushagour/litex | litex/build/openfpgaloader.py | 2b49430f2c53c4a8caa66b678af4660127b546e4 | #
# This file is part of LiteX.
#
# Copyright (c) 2020 Florent Kermarrec <[email protected]>
# SPDX-License-Identifier: BSD-2-Clause
from litex.build.tools import write_to_file
from litex.build.generic_programmer import GenericProgrammer
# openFPGAloader ------------------------------------------------------------------------------------------
class OpenFPGALoader(GenericProgrammer):
needs_bitreverse = False
def __init__(self, board):
self.board = board
def load_bitstream(self, bitstream_file):
cmd = ["openFPGALoader", "--board", self.board, "--bitstream", bitstream_file]
self.call(cmd)
def flash(self, address, data_file):
cmd = ["openFPGALoader", "--board", self.board, "--write-flash", "--bitstream", data_file]
if address:
cmd.append("--offset")
cmd.append(address)
self.call(cmd)
| [] |
NutriBuddi/NutriBuddi | NutriBuddiAPIServices/ImageClassifier/NutriBuddiClassifier/Classifier/FoodClassifier.py | b4343216cbc99b17a1faf4df50b681465418291f | class FoodClassifier:
#Class Attributes:
#model - the underlying keras model
#labels - the labels to be associated with the activation of each output neuron.
#Labels must be the same size as the output layer of the neural network.
def __init__(self, modelpath, labels, min_confidence = 0.6):
from keras.models import load_model
from keras.applications.resnet50 import ResNet50
self.resnet = ResNet50(include_top=False,weights='imagenet',pooling='max',input_shape=(224,224,3))
self.extModel = load_model(modelpath)
if(isinstance(labels,str)):
#its a file path
from os.path import exists
if(exists(labels)):
f = open(labels,'r')
x = f.readlines()
y = []
for i in x:
y.append(i.split('\n')[0])
self.labels = y
else:
self.labels = labels
self.num_classes = len(labels)
self.min_confidence=min_confidence
def predict(self,img):
import os
from PIL import Image
from keras.preprocessing.image import img_to_array
import numpy as np
#check if image is a filepath
if(isinstance(img,str)):
if(not os.path.exists(img)):
print("Error: Invalid File Path")
return ""
else:
#if its a filepath, convert to PIL image
img = Image.open(img)
#resize image
#shape from model input
shape = self.resnet.input_shape
imgr = img.resize(shape[1:3])
x = img_to_array(imgr).reshape((1,shape[1],shape[2],shape[3]))
#predict
features = self.resnet.predict(x)
prediction = self.extModel.predict(features)
#get max of predictions and return label(s)
predIdx = np.argmax(prediction)
if(prediction[0,predIdx]<self.min_confidence):
return ""
else:
return self.labels[predIdx]
def set_extModel(self,model):
self.extModel = model
def get_extModel(self):
return self.extModel
def set_labels(self,labels):
self.labels = labels
def get_labels(self):
return self.labels
def set_min_confidence(self,conf):
self.min_confidence=conf
def get_min_confidence(self):
return self.min_confidence
def generate_features_from_directory(location,target_image_count,model=None):
#generates feature maps from the convolutional layers of ResNet50 using all
#images from the directory
#INPUT:
#directory containing NESTED DIRECTORIES of images. (Very Important)
#the number of feature maps to generate for each image class
#OUTPUT:
#a npy file containing the 2048-dimensional feature vector
#produced by ResNet50's convolutional layers
#data is generated in batches of 32
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras.applications.resnet50 import ResNet50
from os import listdir
from os.path import isdir
#create the model, if not defined
if model==None:
model = ResNet50(weights='imagenet',include_top=False,pooling='max')
#create the data generation
datagen = ImageDataGenerator()
#for each directory in
if(not isdir(location)):
print("could not find location: " + location)
return
for label in listdir(location):
#first check that its a directory
label_path = location+'/'+label
if(not isdir(label_path)):
continue
#create the data generator
#Output size is 256x256 to fit the ResNet50
print("Generating feature maps for " + label + "...")
generator = datagen.flow_from_directory(
label_path,
target_size = (224,224),
batch_size = 32,
class_mode=None)
#use ResNet50 to create the features
features = model.predict_generator(generator,target_image_count/32)
#features = np.reshape(features,(features.shape[0],features.shape[3]))
#save the features in a numpy binary
np.save(location+'/'+label+'.npy', features)
def create_data_set(data_path,output_folder,save_to_file=True):
#combines all npy files into one large file with their respective labels
#INPUTS:
#a directory containing npy fils of all different classes
#Outputs:
#training array and training labels
#label array is returned as a one hot encoding
#label names
from os.path import isdir
from os import listdir
import numpy as np
#find out how many classes
num_classes = 0
label_names = []
if(not isdir(data_path)):
print("Could not find directory: "+ data_path)
return
data_contents = listdir(data_path)
for f in data_contents:
if(f.endswith('.npy')):
num_classes +=1
label_names.append(f.split('.')[0])
if(num_classes==0):
print("Could not find any data files in directory: "+data_path)
return
#generate one-hot label vectors
labels = np.zeros([num_classes,num_classes])
for i in range(0,num_classes):
labels[i][i]=1
#load all arrays into memory.
#In the future, might need to do this on either a high ram machine
#or find another way to concatenate data
arrays = []
sizes = []
for f in data_contents:
if(f.endswith('.npy')):
arr = np.load(data_path+'/'+f)
sizes.append(arr.shape[0])
arrays.append(arr)
X = np.vstack([arr for arr in arrays])
#load the labels into memory
labelcodes = []
for i in range(0,num_classes):
labelcodes.append(np.vstack([labels[i]]*sizes[i]))
y = np.vstack([l for l in labelcodes])
if(save_to_file):
np.save(output_folder+'/data_set.npy',X)
np.save(output_folder+'/label_codes.npy',y)
with open(output_folder+"/labels.txt","w") as output:
output.write("".join([label + '\n' for label in label_names]))
return X,y,label_names
def train_classifier_from_images(train_dir,train_size,val_dir,val_size,output_dir):
#INPUTS:
#train_dir is the directory containig the training images
#test_dir is the directory containing the validation images
#output_dir is the directory to save the trained model
#train_size is the number of images to generate for each training class
#val_size is the number of images to generate for each validation class
#OUTPUTS
#A model that takes as input a 2048-vector of feature maps and outputs
#a prediction of what an image with those features might be.
#The labels file is also placed in this directory
#The model created is an SVM with softmax activation.
from time import time
from keras.applications.resnet50 import ResNet50
from keras.models import Sequential
from keras.optimizers import SGD
from keras.regularizers import l2
from keras.layers import Dense
from sklearn.utils import shuffle
from keras.callbacks import EarlyStopping, ModelCheckpoint
#import ResNet50 without top layer
print("Loading the ResNet50 Network...")
resnet = ResNet50(weights='imagenet',include_top=False,pooling='max')
#create the training and validation datasets for each class
print("Generating Training Set...")
generate_features_from_directory(train_dir,train_size,model=resnet)
print("Generating Testing Set...")
generate_features_from_directory(val_dir,val_size,model=resnet)
#create the combined dataset
print("Combining datasets...")
X_train,y_train,labels = create_data_set(train_dir,output_dir+"/train",save_to_file=True)
X_val,y_val,labels = create_data_set(val_dir,output_dir+"/validation",save_to_file=True)
#shuffle the train data
X_train,y_train = shuffle(X_train,y_train)
num_classes = len(labels)
#create the extension model
print("Creating extension model...")
extModel = Sequential()
extModel.add(Dense(num_classes,input_shape=(2048,), activation='softmax', W_regularizer=l2(0.01)))
extModel.compile(loss='hinge',optimizer=SGD(lr=0.01,momentum=0.9),metrics=["accuracy"])
#callbacks
checkpoint = ModelCheckpoint(output_dir + "/extModel"+str(int(time()))+".h5", monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
early = EarlyStopping(monitor='val_acc', min_delta=0, patience=10, verbose=1, mode='auto')
with open(output_dir+"/labels.txt","w") as output:
output.write("".join([label + '\n' for label in labels]))
#train model
print("Training...")
extModel.fit(X_train,y_train,
batch_size=32,
epochs=50,
validation_data=(X_val,y_val),
callbacks = [checkpoint,early])
return extModel
def add_to_train(train_dir,image,label, resnet):
#INPUTS
#Train_dir - the directory that all npy files are contained
#image - the path to the image being added
#resnet - the resnet model to be used for feature determination
#label - the name of the item
#Appends the features of the new item to the training set data for that label
from PIL import Image
from os.path import exists
from keras.preprocessing.image import img_to_array
if(isinstance(image,str)):
if(not exists(image)):
print("Error: Invalid File Path")
return ""
else:
#if its a filepath, convert to PIL image
img = Image.open(image)
shape = resnet.input_shape
imgr = img.resize(shape[1:3])
x = img_to_array(imgr).reshape((1,shape[1],shape[2],shape[3]))
#predict
features = resnet.predict(x)
import numpy as np
npyname = train_dir+'/'+label+'.npy'
if(not exists(npyname)):
np.save(npyname,features)
else:
fullset = np.load(npyname)
newset = np.append(fullset,features,axis=0)
np.save(npyname,newset)
| [((112, 14, 112, 34), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ({}, {}), '()', False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((118, 17, 118, 34), 'os.listdir', 'listdir', ({(118, 25, 118, 33): 'location'}, {}), '(location)', False, 'from os import listdir\n'), ((164, 20, 164, 38), 'os.listdir', 'listdir', ({(164, 28, 164, 37): 'data_path'}, {}), '(data_path)', False, 'from os import listdir\n'), ((175, 13, 175, 48), 'numpy.zeros', 'np.zeros', ({(175, 22, 175, 47): '[num_classes, num_classes]'}, {}), '([num_classes, num_classes])', True, 'import numpy as np\n'), ((190, 8, 190, 42), 'numpy.vstack', 'np.vstack', ({(190, 18, 190, 41): '[arr for arr in arrays]'}, {}), '([arr for arr in arrays])', True, 'import numpy as np\n'), ((196, 8, 196, 42), 'numpy.vstack', 'np.vstack', ({(196, 18, 196, 41): '[l for l in labelcodes]'}, {}), '([l for l in labelcodes])', True, 'import numpy as np\n'), ((232, 13, 232, 73), 'keras.applications.resnet50.ResNet50', 'ResNet50', (), '', False, 'from keras.applications.resnet50 import ResNet50\n'), ((246, 22, 246, 46), 'sklearn.utils.shuffle', 'shuffle', ({(246, 30, 246, 37): 'X_train', (246, 38, 246, 45): 'y_train'}, {}), '(X_train, y_train)', False, 'from sklearn.utils import shuffle\n'), ((252, 15, 252, 27), 'keras.models.Sequential', 'Sequential', ({}, {}), '()', False, 'from keras.models import Sequential\n'), ((258, 12, 258, 94), 'keras.callbacks.EarlyStopping', 'EarlyStopping', (), '', False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint\n'), ((11, 22, 11, 106), 'keras.applications.resnet50.ResNet50', 'ResNet50', (), '', False, 'from keras.applications.resnet50 import ResNet50\n'), ((12, 24, 12, 45), 'keras.models.load_model', 'load_model', ({(12, 35, 12, 44): 'modelpath'}, {}), '(modelpath)', False, 'from keras.models import load_model\n'), ((58, 18, 58, 39), 'numpy.argmax', 'np.argmax', ({(58, 28, 58, 38): 'prediction'}, {}), '(prediction)', True, 'import numpy as np\n'), ((108, 16, 108, 76), 'keras.applications.resnet50.ResNet50', 'ResNet50', (), '', False, 'from keras.applications.resnet50 import ResNet50\n'), ((115, 11, 115, 26), 'os.path.isdir', 'isdir', ({(115, 17, 115, 25): 'location'}, {}), '(location)', False, 'from os.path import isdir\n'), ((138, 8, 138, 52), 'numpy.save', 'np.save', ({(138, 16, 138, 41): "(location + '/' + label + '.npy')", (138, 43, 138, 51): 'features'}, {}), "(location + '/' + label + '.npy', features)", True, 'import numpy as np\n'), ((160, 11, 160, 27), 'os.path.isdir', 'isdir', ({(160, 17, 160, 26): 'data_path'}, {}), '(data_path)', False, 'from os.path import isdir\n'), ((199, 8, 199, 48), 'numpy.save', 'np.save', ({(199, 16, 199, 45): "(output_folder + '/data_set.npy')", (199, 46, 199, 47): 'X'}, {}), "(output_folder + '/data_set.npy', X)", True, 'import numpy as np\n'), ((200, 8, 200, 51), 'numpy.save', 'np.save', ({(200, 16, 200, 48): "(output_folder + '/label_codes.npy')", (200, 49, 200, 50): 'y'}, {}), "(output_folder + '/label_codes.npy', y)", True, 'import numpy as np\n'), ((307, 11, 307, 26), 'os.path.exists', 'exists', ({(307, 18, 307, 25): 'npyname'}, {}), '(npyname)', False, 'from os.path import exists\n'), ((308, 8, 308, 33), 'numpy.save', 'np.save', ({(308, 16, 308, 23): 'npyname', (308, 24, 308, 32): 'features'}, {}), '(npyname, features)', True, 'import numpy as np\n'), ((310, 18, 310, 34), 'numpy.load', 'np.load', ({(310, 26, 310, 33): 'npyname'}, {}), '(npyname)', True, 'import numpy as np\n'), ((311, 17, 311, 51), 'numpy.append', 'np.append', (), '', True, 'import numpy as np\n'), ((312, 8, 312, 31), 'numpy.save', 'np.save', ({(312, 16, 312, 23): 'npyname', (312, 24, 312, 30): 'newset'}, {}), '(npyname, newset)', True, 'import numpy as np\n'), ((17, 15, 17, 29), 'os.path.exists', 'exists', ({(17, 22, 17, 28): 'labels'}, {}), '(labels)', False, 'from os.path import exists\n'), ((121, 15, 121, 32), 'os.path.isdir', 'isdir', ({(121, 21, 121, 31): 'label_path'}, {}), '(label_path)', False, 'from os.path import isdir\n'), ((186, 18, 186, 42), 'numpy.load', 'np.load', ({(186, 26, 186, 41): "data_path + '/' + f"}, {}), "(data_path + '/' + f)", True, 'import numpy as np\n'), ((195, 26, 195, 57), 'numpy.vstack', 'np.vstack', ({(195, 36, 195, 56): '([labels[i]] * sizes[i])'}, {}), '([labels[i]] * sizes[i])', True, 'import numpy as np\n'), ((254, 44, 254, 69), 'keras.optimizers.SGD', 'SGD', (), '', False, 'from keras.optimizers import SGD\n'), ((286, 15, 286, 28), 'os.path.exists', 'exists', ({(286, 22, 286, 27): 'image'}, {}), '(image)', False, 'from os.path import exists\n'), ((291, 18, 291, 35), 'PIL.Image.open', 'Image.open', ({(291, 29, 291, 34): 'image'}, {}), '(image)', False, 'from PIL import Image\n'), ((296, 8, 296, 26), 'keras.preprocessing.image.img_to_array', 'img_to_array', ({(296, 21, 296, 25): 'imgr'}, {}), '(imgr)', False, 'from keras.preprocessing.image import img_to_array\n'), ((38, 19, 38, 38), 'os.path.exists', 'os.path.exists', ({(38, 34, 38, 37): 'img'}, {}), '(img)', False, 'import os\n'), ((43, 22, 43, 37), 'PIL.Image.open', 'Image.open', ({(43, 33, 43, 36): 'img'}, {}), '(img)', False, 'from PIL import Image\n'), ((50, 12, 50, 30), 'keras.preprocessing.image.img_to_array', 'img_to_array', ({(50, 25, 50, 29): 'imgr'}, {}), '(imgr)', False, 'from keras.preprocessing.image import img_to_array\n'), ((253, 92, 253, 100), 'keras.regularizers.l2', 'l2', ({(253, 95, 253, 99): '(0.01)'}, {}), '(0.01)', False, 'from keras.regularizers import l2\n'), ((257, 66, 257, 72), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n')] |
freehackquest/libfhqcli-py | freehackquest_libclient_py/__init__.py | 382242943047b63861aad0f41bb89c82e755963c | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020-2021 FreeHackQuest Team <[email protected]>
"""This file was automatically generated by fhq-server
Version: v0.2.47
Date: 2022-01-01 07:15:35
"""
from freehackquest_libclient_py.freehackquest_client import FreeHackQuestClient
| [] |
SpiralDevelopment/crypto-hft-data | exchange_sockets/bitstamp_websocket.py | 205f01fd555eab4f636ffbb701dfcde53d27becc | from exchange_sockets.exchange_websocket import ExchangeWebSocket
from singletones.custom_logger import MyLogger
import websocket
import threading
from time import sleep
from time import time
import json
import ssl
logger = MyLogger()
class BitstampWebsocket(ExchangeWebSocket):
def __init__(self, pairs_n_streams):
super().__init__('Bitstamp', pairs_n_streams)
self.possible_streams = ['live_trades', 'diff_order_book']
self.streams = []
def init_streams(self):
for pair, streams in self.pairs_n_streams.items():
for sub_stream in streams.split(','):
if self.has_stream(sub_stream):
cur = dict()
cur['event'] = 'bts:subscribe'
cur['data'] = {'channel': "{}_{}".format(sub_stream, pair)}
self.streams.append(cur)
def start_multiple_websocket(self, init_streams=True):
super().start_multiple_websocket(init_streams=init_streams)
websocket.enableTrace(True)
self.ws = websocket.WebSocketApp("wss://ws.bitstamp.net",
on_open=self.__on_open,
on_message=self.__on_message,
on_error=self.__on_error,
on_close=self.__on_close)
self.wst = threading.Thread(target=lambda: self.ws.run_forever(sslopt={'cert_reqs': ssl.CERT_NONE}))
self.wst.daemon = True
self.wst.start()
logger.debug("Started thread")
# Wait for connect before continuing
conn_timeout = 15
while not self.ws.sock or not self.ws.sock.connected and conn_timeout:
sleep(1)
conn_timeout -= 1
if not conn_timeout:
logger.error("%s Couldn't connect to %s! Exiting.",
self.node,
self.exchange)
self.close_socket()
else:
logger.info('{} socket is started:\n{}\n{}'.format(self.exchange,
self.node,
str(self.streams)))
def save_trades(self, message):
data = message['data']
channel = message['channel']
symbol = channel.split('_')[-1]
stream = channel[:-(len(symbol) + 1)]
append_data = "{},{},{},{}\n".format(data['timestamp'],
data['price'],
data['amount'],
data['type'])
self.file_manager.save_data_to_file(self.exchange,
stream,
symbol,
append_data)
def save_level2_orderbook(self, message):
data = message['data']
channel = message['channel']
symbol = channel.split('_')[-1]
stream = channel[:-(len(symbol) + 1)]
all_data = {}
data_time = data['timestamp']
for side in ['bids', 'asks']:
for cur in data[side]:
if not all_data.get(symbol, None):
all_data[symbol] = []
price = cur[0]
size = cur[1]
all_data[symbol].append("{},{},{}\n".format(
data_time,
price,
size if side == "bids" else "-{}".format(size)))
for symbol, l2_ob_data in all_data.items():
for l2_ob in l2_ob_data:
self.file_manager.save_data_to_file(self.exchange,
stream,
symbol,
l2_ob)
def __on_message(self, ws, message):
if message is None:
return
try:
self.last_msg_time = int(time())
message = json.loads(message)
channel = message['channel']
if channel.startswith('diff_order_book'):
self.save_level2_orderbook(message)
elif channel.startswith('live_trades'):
self.save_trades(message)
except Exception as e:
logger.debug(str(e))
def __on_error(self, ws, error):
self.on_error = True
logger.error("On error\n{}\n{} {}".format(self.node,
self.exchange,
error))
def __on_close(self, ws):
logger.info("On close\n{}".format(self.exchange))
def __on_open(self, ws):
logger.info("On Open\n{}".format(self.exchange))
if self.streams:
for stream in self.streams:
logger.info('Subscribing to %s', json.dumps(stream))
self.ws.send(json.dumps(stream))
sleep(2)
else:
logger.error('%s. Stream is not initialized', self.exchange)
def close_socket(self):
self.exited = True
if self.ws:
self.ws.close()
| [((10, 9, 10, 19), 'singletones.custom_logger.MyLogger', 'MyLogger', ({}, {}), '()', False, 'from singletones.custom_logger import MyLogger\n'), ((32, 8, 32, 35), 'websocket.enableTrace', 'websocket.enableTrace', ({(32, 30, 32, 34): '(True)'}, {}), '(True)', False, 'import websocket\n'), ((34, 18, 38, 66), 'websocket.WebSocketApp', 'websocket.WebSocketApp', (), '', False, 'import websocket\n'), ((48, 12, 48, 20), 'time.sleep', 'sleep', ({(48, 18, 48, 19): '(1)'}, {}), '(1)', False, 'from time import sleep\n'), ((112, 22, 112, 41), 'json.loads', 'json.loads', ({(112, 33, 112, 40): 'message'}, {}), '(message)', False, 'import json\n'), ((111, 37, 111, 43), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((140, 16, 140, 24), 'time.sleep', 'sleep', ({(140, 22, 140, 23): '(2)'}, {}), '(2)', False, 'from time import sleep\n'), ((138, 49, 138, 67), 'json.dumps', 'json.dumps', ({(138, 60, 138, 66): 'stream'}, {}), '(stream)', False, 'import json\n'), ((139, 29, 139, 47), 'json.dumps', 'json.dumps', ({(139, 40, 139, 46): 'stream'}, {}), '(stream)', False, 'import json\n')] |
katerakelly/pytorch-maml | src/data_loading.py | 75907aca148ad053dfaf75fc138319f0d89534a8 | import numpy as np
import random
import torch
from torch.utils.data import DataLoader
from torch.utils.data.sampler import Sampler
import torchvision.transforms as transforms
from dataset import Omniglot, MNIST
'''
Helpers for loading class-balanced few-shot tasks
from datasets
'''
class ClassBalancedSampler(Sampler):
'''
Samples class-balanced batches from 'num_cl' pools each
of size 'num_inst'
If 'batch_cutoff' is None, indices for iterating over batches
of the entire dataset will be returned
Otherwise, indices for the number of batches up to the batch_cutoff
will be returned
(This is to allow sampling with replacement across training iterations)
'''
def __init__(self, num_cl, num_inst, batch_cutoff=None):
self.num_cl = num_cl
self.num_inst = num_inst
self.batch_cutoff = batch_cutoff
def __iter__(self):
'''return a single list of indices, assuming that items will be grouped by class '''
# First construct batches of 1 instance per class
batches = [[i+j*self.num_inst for i in torch.randperm(self.num_inst)] for j in range(self.num_cl)]
batches = [[batches[j][i] for j in range(self.num_cl)] for i in range(self.num_inst)]
# Shuffle within each batch so that classes don't always appear in same order
for sublist in batches:
random.shuffle(sublist)
if self.batch_cutoff is not None:
random.shuffle(batches)
batches = batches[:self.batch_cutoff]
batches = [item for sublist in batches for item in sublist]
return iter(batches)
def __len__(self):
return 1
def get_data_loader(task, batch_size=1, split='train'):
# NOTE: batch size here is # instances PER CLASS
if task.dataset == 'mnist':
normalize = transforms.Normalize(mean=[0.13066, 0.13066, 0.13066], std=[0.30131, 0.30131, 0.30131])
dset = MNIST(task, transform=transforms.Compose([transforms.ToTensor(), normalize]), split=split)
else:
normalize = transforms.Normalize(mean=[0.92206, 0.92206, 0.92206], std=[0.08426, 0.08426, 0.08426])
dset = Omniglot(task, transform=transforms.Compose([transforms.ToTensor(), normalize]), split=split)
sampler = ClassBalancedSampler(task.num_cl, task.num_inst, batch_cutoff = (None if split != 'train' else batch_size))
loader = DataLoader(dset, batch_size=batch_size*task.num_cl, sampler=sampler, num_workers=1, pin_memory=True)
return loader
| [((61, 13, 61, 113), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import DataLoader\n'), ((55, 20, 55, 107), 'torchvision.transforms.Normalize', 'transforms.Normalize', (), '', True, 'import torchvision.transforms as transforms\n'), ((58, 20, 58, 107), 'torchvision.transforms.Normalize', 'transforms.Normalize', (), '', True, 'import torchvision.transforms as transforms\n'), ((39, 11, 39, 34), 'random.shuffle', 'random.shuffle', ({(39, 26, 39, 33): 'sublist'}, {}), '(sublist)', False, 'import random\n'), ((42, 11, 42, 34), 'random.shuffle', 'random.shuffle', ({(42, 26, 42, 33): 'batches'}, {}), '(batches)', False, 'import random\n'), ((35, 46, 35, 75), 'torch.randperm', 'torch.randperm', ({(35, 61, 35, 74): 'self.num_inst'}, {}), '(self.num_inst)', False, 'import torch\n'), ((56, 57, 56, 78), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ({}, {}), '()', True, 'import torchvision.transforms as transforms\n'), ((59, 60, 59, 81), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ({}, {}), '()', True, 'import torchvision.transforms as transforms\n')] |
Tal-Shay-Group/DoChaP | DoChaP-db/UnusedScripts/main.py | e721c6742fdff5f771bb947d92fa6cf66831939a | #!/usr/bin/python
import sys
import os
sys.path.append(os.getcwd())
from Director import Director
from OrthologsBuilder import *
from SpeciesDB import *
if __name__ == "__main__":
inputDict = {}
for inarg in sys.argv[1:]:
try:
splitArg = inarg.strip("-").split("=")
if splitArg[0] in ("download", "withEns"):
inputDict[splitArg[0]] = splitArg[1]
else:
raise ValueError("Wrong input arguments. only accepts arguments 'download' and 'withEns'")
except AttributeError or IndexError:
raise ValueError("Make sure that input arguments are argumentName=argumentValue")
species = ['M_musculus', 'H_sapiens', 'R_norvegicus', 'D_rerio', 'X_tropicalis']
download = inputDict['download'] == 'True'
withEns = inputDict['withEns'] == 'True'
print("Running DBbuilder with Download {} and withENS {}".format(download, withEns))
print(type(download))
print(type(withEns))
director = Director()
orthologs = OrthologsBuilder(species=species, download=download)
director.setBuilder(orthologs)
director.collectFromSource(download=download)
spl = len(species)
spnum = 1
for sp in species:
print("===========Current Species: {}===========".format(sp))
dbBuild = dbBuilder(sp, download=download, withEns=withEns)
dbBuild.create_tables_db(merged=False)
dbBuild.fill_in_db(merged=False)
print("Filling {} completed!".format(dbBuild.dbName))
if spnum == 1:
dbBuild.create_tables_db(merged=True)
dbBuild.fill_in_db(merged=True)
if spnum == spl:
dbBuild.create_index()
dbBuild.AddOrthology(orthologs.OrthoTable)
spnum += 1
print("Filling {} completed!".format(dbBuild.dbName))
| [((5, 16, 5, 27), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((27, 15, 27, 25), 'Director.Director', 'Director', ({}, {}), '()', False, 'from Director import Director\n')] |
eigenein/sqlitemap | tests/constants.py | 25846178dee90cfe45a2bc951309301bc7f3694b | # See also: https://stackoverflow.com/questions/3694276/what-are-valid-table-names-in-sqlite
good_table_names = [
'foo',
'123abc',
'123abc.txt',
'123abc-ABC.txt',
'foo""bar',
'😀',
'_sqlite',
]
# See also: https://stackoverflow.com/questions/3694276/what-are-valid-table-names-in-sqlite
bad_table_names = [
'"',
'"foo"',
'sqlite_',
'sqlite_reserved',
]
| [] |
amire80/TWLight | TWLight/settings/base.py | 063a385ea46c61a4889ba88e3fded4183c3a6bd3 | # -*- coding: utf-8 -*-
"""
Base settings for twlight project.
This is not intended to be used as the live settings file for a project and will
not work as one. You should instead use production.py, local.py, heroku.py, or
another file that you write. These files should live in the settings directory;
start with 'from .base import *'; and proceed to add or override settings as
appropriate to their context. In particular, you will need to set ALLOWED_HOSTS
before your app will run.
If you want to use production settings, you are now done. If not, you will also
need to set the environment variables indicated in the README.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
import os
import json
from django.contrib import messages
from django.urls import reverse_lazy
from django.utils.translation import gettext_lazy as _
# Import available locales from Faker, so we can determine what languages we fake in tests.
from faker.config import AVAILABLE_LOCALES as FAKER_AVAILABLE_LOCALES
# We're going to replace Django's default logging config.
import logging.config
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TWLIGHT_HOME = os.path.dirname(
os.path.dirname(os.path.abspath(os.path.join(os.path.abspath(__file__), os.pardir)))
)
TWLIGHT_ENV = os.environ.get("TWLIGHT_ENV")
# An atypical way of setting django languages for TranslateWiki integration:
# https://translatewiki.net/wiki/Thread:Support/_The_following_issue_is_unconfirmed,_still_to_be_investigated._Adding_TheWikipediaLibrary_Card_Platform_TranslateWiki
# Get the language codes from the locale directories, and compare them to the
# languages in Wikimedia CLDR. Use langauge autonyms from Wikimedia.
# We periodically pull:
# https://raw.githubusercontent.com/wikimedia/language-data/master/data/language-data.json
# into locale/language-data.json
def get_languages_from_locale_subdirectories(dir):
current_languages = []
language_data_json = open(os.path.join(dir, "language-data.json"))
languages = json.loads(language_data_json.read())["languages"]
for locale_dir in os.listdir(dir):
if os.path.isdir(os.path.join(dir, locale_dir)):
for lang_code, lang_data in languages.items():
autonym = lang_data[-1]
if locale_dir == lang_code:
current_languages += [(lang_code, autonym)]
return sorted(set(current_languages))
# Get the intersection of available Faker locales and the specified language set.
def get_django_faker_languages_intersection(languages):
languages_intersection = []
for locale in FAKER_AVAILABLE_LOCALES:
for i, (djlang_code, djlang_name) in enumerate(languages):
# Exclude common English locales from random test selection; English often works while others are broken.
if (
locale == djlang_code
and locale != "en"
and locale != "en_US"
and locale != "en_GB"
):
languages_intersection += [locale]
return sorted(set(languages_intersection))
# ------------------------------------------------------------------------------
# ------------------------> core django configurations <------------------------
# ------------------------------------------------------------------------------
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = [
"django.contrib.admin",
"django.contrib.admindocs",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"whitenoise.runserver_nostatic", # Not a django app; replaces staticfiles
"django.contrib.staticfiles",
"django.contrib.sites", # required by django.contrib.comments
]
THIRD_PARTY_APPS = [
"annoying",
"crispy_forms",
"reversion",
"dal",
"dal_select2",
"django_comments",
"django_cron",
"django_filters",
"modeltranslation",
# DO NOT CONFUSE THIS with requests, the Python URL library! This is
# django-request, the user analytics package.
"request",
"django_countries",
"rest_framework",
"rest_framework.authtoken",
"django_extensions",
]
TWLIGHT_APPS = [
"TWLight.i18n",
"TWLight.users",
"TWLight.resources",
"TWLight.applications",
"TWLight.emails",
"TWLight.graphs",
"TWLight.comments",
"TWLight.api",
"TWLight.ezproxy",
]
# dal (autocomplete_light) and modeltranslation must go before django.contrib.admin.
INSTALLED_APPS = THIRD_PARTY_APPS + DJANGO_APPS + TWLIGHT_APPS
# CRON CONFIGURATION
# ------------------------------------------------------------------------------
CRON_CLASSES = [
"TWLight.crons.BackupCronJob",
"TWLight.crons.SendCoordinatorRemindersCronJob",
"TWLight.crons.UserRenewalNoticeCronJob",
"TWLight.crons.ProxyWaitlistDisableCronJob",
"TWLight.crons.UserUpdateEligibilityCronJob",
"TWLight.crons.ClearSessions",
]
# REST FRAMEWORK CONFIG
# ------------------------------------------------------------------------------
REST_FRAMEWORK = {
"DEFAULT_VERSIONING_CLASS": "rest_framework.versioning.NamespaceVersioning"
}
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
# WhiteNoise should be loaded before everything but security.
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
# LocaleMiddleware must go after Session (and Cache, if used), but before
# Common.
"django.middleware.locale.LocaleMiddleware",
"django.middleware.common.CommonMiddleware",
"django.contrib.admindocs.middleware.XViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
# The default storage backend relies on sessions.
# That’s why SessionMiddleware must be enabled and appear before
# MessageMiddleware.
"django.contrib.messages.middleware.MessageMiddleware",
]
# DEBUG
# ------------------------------------------------------------------------------
# By setting this an an environment variable, it is easy to switch debug on in
# servers to do a quick test.
# DEBUG SHOULD BE FALSE ON PRODUCTION for security reasons.
DEBUG = bool(os.environ.get("DEBUG", "False").lower() == "true")
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
# WMF sysadmins strongly prefer mysql, so use that.
# If you're deploying to Heroku, heroku.py will override this.
DATABASES = {
"default": {
"ENGINE": "django.db.backends.mysql",
"NAME": os.environ.get("DJANGO_DB_NAME", None),
"USER": os.environ.get("DJANGO_DB_USER", None),
"PASSWORD": os.environ.get("DJANGO_DB_PASSWORD", None),
"HOST": os.environ.get("DJANGO_DB_HOST", None),
"PORT": "3306",
# This is critical for handling Unicode data due to stupid properties
# of MySQL; see https://stackoverflow.com/questions/2108824/mysql-incorrect-string-value-error-when-save-unicode-string-in-django .
"OPTIONS": {
"charset": "utf8mb4",
"init_command": "SET sql_mode='STRICT_ALL_TABLES'; SET storage_engine='INNODB';",
},
}
}
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get("SECRET_KEY")
# In production, this list should contain the URL of the server and nothing
# else, for security reasons. For local testing '*' is OK.
ALLOWED_HOSTS = os.environ.get("ALLOWED_HOSTS", "localhost 127.0.0.1 [::1]").split(" ")
# Let Django know about external URLs in case they differ from internal
# Needed to be added for /admin
USE_X_FORWARDED_HOST = True
REQUEST_BASE_URL = os.environ.get("REQUEST_BASE_URL", None)
ROOT_URLCONF = "TWLight.urls"
WSGI_APPLICATION = "TWLight.wsgi.application"
SITE_ID = 1
# Overwrite messages.ERROR to use danger instead, to play nice with bootstrap
MESSAGE_TAGS = {messages.ERROR: "danger"}
# INTERNATIONALIZATION CONFIGURATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = "en" # Sets site default language.
# https://django-modeltranslation.readthedocs.io/en/latest/installation.html#advanced-settings
MODELTRANSLATION_DEFAULT_LANGUAGE = (
LANGUAGE_CODE # sets the modeltranslation default language.
)
LOCALE_PATHS = [
# makemessages looks for locale/ in the top level, not the project level.
os.path.join(os.path.dirname(BASE_DIR), "locale")
]
# We're letting the file-based translation contributions dictate the languages
# available to the system. This keeps our column and index count for db-stored
# translations as low as possible while allowing translatewiki contributions to
# be used without reconfiguring the site.
LANGUAGES = get_languages_from_locale_subdirectories(LOCALE_PATHS[0])
FAKER_LOCALES = get_django_faker_languages_intersection(LANGUAGES)
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(BASE_DIR, "templates")],
"OPTIONS": {
# Reiterating the default so we can add to it later.
"context_processors": (
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.request",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
),
# We cache templates by default.
"loaders": [
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
],
},
}
]
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, "collectedstatic")
STATIC_URL = "/static/"
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# MEDIA FILE CONFIGURATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/1.8/topics/files/
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), "media")
MEDIA_URL = "/media/"
# ------------------------------------------------------------------------------
# -----------------> third-party and TWLight configurations <-------------------
# ------------------------------------------------------------------------------
CRISPY_TEMPLATE_PACK = "bootstrap3"
# EZPROXY CONFIGURATION
# ------------------------------------------------------------------------------
TWLIGHT_EZPROXY_URL = os.environ.get("TWLIGHT_EZPROXY_URL", None)
TWLIGHT_EZPROXY_SECRET = os.environ.get("TWLIGHT_EZPROXY_SECRET", None)
# OAUTH CONFIGURATION
# ------------------------------------------------------------------------------
LOGIN_URL = reverse_lazy("oauth_login")
LOGIN_REDIRECT_URL = reverse_lazy("users:home")
AUTHENTICATION_BACKENDS = [
"TWLight.users.oauth.OAuthBackend",
"django.contrib.auth.backends.ModelBackend",
]
TWLIGHT_OAUTH_PROVIDER_URL = os.environ.get("TWLIGHT_OAUTH_PROVIDER_URL", None)
TWLIGHT_OAUTH_CONSUMER_KEY = os.environ.get("TWLIGHT_OAUTH_CONSUMER_KEY", None)
TWLIGHT_OAUTH_CONSUMER_SECRET = os.environ.get("TWLIGHT_OAUTH_CONSUMER_SECRET", None)
# API CONFIGURATION
# ------------------------------------------------------------------------------
TWLIGHT_API_PROVIDER_ENDPOINT = os.environ.get("TWLIGHT_API_PROVIDER_ENDPOINT", None)
# COMMENTS CONFIGURATION
# ------------------------------------------------------------------------------
COMMENTS_APP = "TWLight.comments"
# REVERSION CONFIGURATION
# ------------------------------------------------------------------------------
# See https://django-reversion.readthedocs.org/ .
# We are NOT using reversion middleware, because that creates revisions when
# save() is called in the context of some http requests, but not on all database
# saves. This makes it untestable. Instead we decorate the Application.save().
# DJMAIL CONFIGURATION
# ------------------------------------------------------------------------------
DJMAIL_REAL_BACKEND = os.environ.get(
"DJANGO_EMAIL_BACKEND", "django.core.mail.backends.console.EmailBackend"
)
EMAIL_BACKEND = "djmail.backends.async.EmailBackend"
EMAIL_HOST = os.environ.get("DJANGO_EMAIL_HOST", "localhost")
EMAIL_PORT = 25
EMAIL_HOST_USER = ""
EMAIL_HOST_PASSWORD = ""
EMAIL_USE_TLS = False
INSTALLED_APPS += ["djmail"]
# DJANGO_REQUEST CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE += ["request.middleware.RequestMiddleware"]
# The following are set for privacy purposes. Note that, if some amount of
# geographic tracking is desired, there is a REQUEST_ANONYMOUS_IP setting which
# scrubs the last octet of the IP address, which could be used instead of
# REQUEST_LOG_IP. There is not a way to get semi-granular user tracking (such
# as tracking only authenticated vs anonymous users).
REQUEST_LOG_IP = False
REQUEST_LOG_USER = False
# LOGGING CONFIGURATION
# ------------------------------------------------------------------------------
# We're replacing the default logging config to get better control of the
# mail_admins behavior.
LOGGING_CONFIG = None
logging.config.dictConfig(
{
"version": 1,
"disable_existing_loggers": False,
"filters": {
"require_debug_false": {"()": "django.utils.log.RequireDebugFalse"},
"require_debug_true": {"()": "django.utils.log.RequireDebugTrue"},
},
"formatters": {
"django.server": {
"()": "django.utils.log.ServerFormatter",
"format": "[%(server_time)s] %(message)s",
}
},
"handlers": {
"nodebug_console": {
"level": "WARNING",
"filters": ["require_debug_false"],
"class": "logging.StreamHandler",
},
"debug_console": {
"level": "INFO",
"filters": ["require_debug_true"],
"class": "logging.StreamHandler",
},
"django.server": {
"level": "INFO",
"class": "logging.StreamHandler",
"formatter": "django.server",
},
},
"loggers": {
"django": {
"handlers": ["nodebug_console", "debug_console"],
"level": os.environ.get("DJANGO_LOG_LEVEL", "INFO"),
},
"django.server": {
"handlers": ["django.server"],
"level": os.environ.get("DJANGO_LOG_LEVEL", "INFO"),
"propagate": False,
},
"TWLight": {
"handlers": ["nodebug_console", "debug_console"],
"level": os.environ.get("DJANGO_LOG_LEVEL", "INFO"),
},
},
}
)
| [((42, 14, 42, 43), 'os.environ.get', 'os.environ.get', ({(42, 29, 42, 42): '"""TWLIGHT_ENV"""'}, {}), "('TWLIGHT_ENV')", False, 'import os\n'), ((212, 13, 212, 41), 'os.environ.get', 'os.environ.get', ({(212, 28, 212, 40): '"""SECRET_KEY"""'}, {}), "('SECRET_KEY')", False, 'import os\n'), ((222, 19, 222, 59), 'os.environ.get', 'os.environ.get', ({(222, 34, 222, 52): '"""REQUEST_BASE_URL"""', (222, 54, 222, 58): 'None'}, {}), "('REQUEST_BASE_URL', None)", False, 'import os\n'), ((306, 14, 306, 55), 'os.path.join', 'os.path.join', ({(306, 27, 306, 35): 'BASE_DIR', (306, 37, 306, 54): '"""collectedstatic"""'}, {}), "(BASE_DIR, 'collectedstatic')", False, 'import os\n'), ((330, 22, 330, 65), 'os.environ.get', 'os.environ.get', ({(330, 37, 330, 58): '"""TWLIGHT_EZPROXY_URL"""', (330, 60, 330, 64): 'None'}, {}), "('TWLIGHT_EZPROXY_URL', None)", False, 'import os\n'), ((331, 25, 331, 71), 'os.environ.get', 'os.environ.get', ({(331, 40, 331, 64): '"""TWLIGHT_EZPROXY_SECRET"""', (331, 66, 331, 70): 'None'}, {}), "('TWLIGHT_EZPROXY_SECRET', None)", False, 'import os\n'), ((336, 12, 336, 39), 'django.urls.reverse_lazy', 'reverse_lazy', ({(336, 25, 336, 38): '"""oauth_login"""'}, {}), "('oauth_login')", False, 'from django.urls import reverse_lazy\n'), ((337, 21, 337, 47), 'django.urls.reverse_lazy', 'reverse_lazy', ({(337, 34, 337, 46): '"""users:home"""'}, {}), "('users:home')", False, 'from django.urls import reverse_lazy\n'), ((344, 29, 344, 79), 'os.environ.get', 'os.environ.get', ({(344, 44, 344, 72): '"""TWLIGHT_OAUTH_PROVIDER_URL"""', (344, 74, 344, 78): 'None'}, {}), "('TWLIGHT_OAUTH_PROVIDER_URL', None)", False, 'import os\n'), ((346, 29, 346, 79), 'os.environ.get', 'os.environ.get', ({(346, 44, 346, 72): '"""TWLIGHT_OAUTH_CONSUMER_KEY"""', (346, 74, 346, 78): 'None'}, {}), "('TWLIGHT_OAUTH_CONSUMER_KEY', None)", False, 'import os\n'), ((347, 32, 347, 85), 'os.environ.get', 'os.environ.get', ({(347, 47, 347, 78): '"""TWLIGHT_OAUTH_CONSUMER_SECRET"""', (347, 80, 347, 84): 'None'}, {}), "('TWLIGHT_OAUTH_CONSUMER_SECRET', None)", False, 'import os\n'), ((352, 32, 352, 85), 'os.environ.get', 'os.environ.get', ({(352, 47, 352, 78): '"""TWLIGHT_API_PROVIDER_ENDPOINT"""', (352, 80, 352, 84): 'None'}, {}), "('TWLIGHT_API_PROVIDER_ENDPOINT', None)", False, 'import os\n'), ((371, 22, 373, 1), 'os.environ.get', 'os.environ.get', ({(372, 4, 372, 26): '"""DJANGO_EMAIL_BACKEND"""', (372, 28, 372, 76): '"""django.core.mail.backends.console.EmailBackend"""'}, {}), "('DJANGO_EMAIL_BACKEND',\n 'django.core.mail.backends.console.EmailBackend')", False, 'import os\n'), ((375, 13, 375, 61), 'os.environ.get', 'os.environ.get', ({(375, 28, 375, 47): '"""DJANGO_EMAIL_HOST"""', (375, 49, 375, 60): '"""localhost"""'}, {}), "('DJANGO_EMAIL_HOST', 'localhost')", False, 'import os\n'), ((56, 22, 56, 37), 'os.listdir', 'os.listdir', ({(56, 33, 56, 36): 'dir'}, {}), '(dir)', False, 'import os\n'), ((308, 20, 308, 52), 'os.path.join', 'os.path.join', ({(308, 33, 308, 41): 'BASE_DIR', (308, 43, 308, 51): '"""static"""'}, {}), "(BASE_DIR, 'static')", False, 'import os\n'), ((316, 26, 316, 51), 'os.path.dirname', 'os.path.dirname', ({(316, 42, 316, 50): 'BASE_DIR'}, {}), '(BASE_DIR)', False, 'import os\n'), ((36, 43, 36, 68), 'os.path.abspath', 'os.path.abspath', ({(36, 59, 36, 67): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((54, 30, 54, 69), 'os.path.join', 'os.path.join', ({(54, 43, 54, 46): 'dir', (54, 48, 54, 68): '"""language-data.json"""'}, {}), "(dir, 'language-data.json')", False, 'import os\n'), ((193, 16, 193, 54), 'os.environ.get', 'os.environ.get', ({(193, 31, 193, 47): '"""DJANGO_DB_NAME"""', (193, 49, 193, 53): 'None'}, {}), "('DJANGO_DB_NAME', None)", False, 'import os\n'), ((194, 16, 194, 54), 'os.environ.get', 'os.environ.get', ({(194, 31, 194, 47): '"""DJANGO_DB_USER"""', (194, 49, 194, 53): 'None'}, {}), "('DJANGO_DB_USER', None)", False, 'import os\n'), ((195, 20, 195, 62), 'os.environ.get', 'os.environ.get', ({(195, 35, 195, 55): '"""DJANGO_DB_PASSWORD"""', (195, 57, 195, 61): 'None'}, {}), "('DJANGO_DB_PASSWORD', None)", False, 'import os\n'), ((196, 16, 196, 54), 'os.environ.get', 'os.environ.get', ({(196, 31, 196, 47): '"""DJANGO_DB_HOST"""', (196, 49, 196, 53): 'None'}, {}), "('DJANGO_DB_HOST', None)", False, 'import os\n'), ((216, 16, 216, 76), 'os.environ.get', 'os.environ.get', ({(216, 31, 216, 46): '"""ALLOWED_HOSTS"""', (216, 48, 216, 75): '"""localhost 127.0.0.1 [::1]"""'}, {}), "('ALLOWED_HOSTS', 'localhost 127.0.0.1 [::1]')", False, 'import os\n'), ((249, 17, 249, 42), 'os.path.dirname', 'os.path.dirname', ({(249, 33, 249, 41): 'BASE_DIR'}, {}), '(BASE_DIR)', False, 'import os\n'), ((57, 25, 57, 54), 'os.path.join', 'os.path.join', ({(57, 38, 57, 41): 'dir', (57, 43, 57, 53): 'locale_dir'}, {}), '(dir, locale_dir)', False, 'import os\n'), ((274, 17, 274, 52), 'os.path.join', 'os.path.join', ({(274, 30, 274, 38): 'BASE_DIR', (274, 40, 274, 51): '"""templates"""'}, {}), "(BASE_DIR, 'templates')", False, 'import os\n'), ((39, 49, 39, 74), 'os.path.abspath', 'os.path.abspath', ({(39, 65, 39, 73): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((181, 13, 181, 45), 'os.environ.get', 'os.environ.get', ({(181, 28, 181, 35): '"""DEBUG"""', (181, 37, 181, 44): '"""False"""'}, {}), "('DEBUG', 'False')", False, 'import os\n'), ((438, 25, 438, 67), 'os.environ.get', 'os.environ.get', ({(438, 40, 438, 58): '"""DJANGO_LOG_LEVEL"""', (438, 60, 438, 66): '"""INFO"""'}, {}), "('DJANGO_LOG_LEVEL', 'INFO')", False, 'import os\n'), ((442, 25, 442, 67), 'os.environ.get', 'os.environ.get', ({(442, 40, 442, 58): '"""DJANGO_LOG_LEVEL"""', (442, 60, 442, 66): '"""INFO"""'}, {}), "('DJANGO_LOG_LEVEL', 'INFO')", False, 'import os\n'), ((447, 25, 447, 67), 'os.environ.get', 'os.environ.get', ({(447, 40, 447, 58): '"""DJANGO_LOG_LEVEL"""', (447, 60, 447, 66): '"""INFO"""'}, {}), "('DJANGO_LOG_LEVEL', 'INFO')", False, 'import os\n')] |
informalsystems/modelator-py | modelator_py/util/tla/_optable.py | d66464096c022799e680e6201590a2ead69be32d | """Table of operators."""
# Copyright 2020 by California Institute of Technology
# Copyright (c) 2008-2013 INRIA and Microsoft Corporation
# All rights reserved. Licensed under 3-clause BSD.
#
# This module is based on the file:
#
# <https://github.com/tlaplus/tlapm/blob/main/src/optable.ml>
#
import pprint
from .ast import Nodes as nodes
# open Builtin
# type fixity =
# | Nonfix
# | Prefix | Postfix
# | Infix of assoc
class Fixity:
pass
class Nonfix(Fixity):
pass
class Prefix(Fixity):
pass
class Postfix(Fixity):
pass
class Infix(Fixity):
def __init__(self, assoc):
self.assoc = assoc
# and assoc =
# | Left | Non | Right
class Assoc:
pass
class Left(Assoc):
pass
class Right(Assoc):
pass
class Non(Assoc):
pass
# and dom =
# (* primitive operators *)
# | Logic | Sets | Modal
# (* user-definable operators *)
# | User
dom = {"Logic", "Sets", "Modal", "User"}
# type prec = int * int
class Prec:
def __init__(self, a, b):
self.a = a
self.b = b
# let withdef (name, prec, fix, als, defn) = (
# name, prec, fix, als, Some defn);;
def withdef(tuple_):
name, prec, fix, als, defn = tuple_
return (name, prec, fix, als, defn)
# let tlaops = [
# Logic,
# List.map withdef [
# '=>', ( 1, 1), Infix(Non()), [], Implies ;
# '<=>', ( 2, 2), Infix(Non()), [ '\\equiv' ], Equiv ;
# '/\\', ( 3, 3), Infix(Left()), [ '\\land' ], Conj ;
# '\\/', ( 3, 3), Infix(Left()), [ '\\lor' ], Disj ;
# '~', ( 4, 4), Prefix, [ '\\neg' ; '\\lnot' ], Neg ;
# '=', ( 5, 5), Infix(Non()), [], Eq ;
# '#', ( 5, 5), Infix(Non()), [ '/=' ], Neq ;
# ] ;
# Sets,
# List.map withdef [
# 'SUBSET', ( 8, 8), Prefix, [], SUBSET ;
# 'UNION', ( 8, 8), Prefix, [], UNION ;
# 'DOMAIN', ( 9, 9), Prefix, [], DOMAIN ;
# '\\subseteq', ( 5, 5), Infix(Non()), [], Subseteq ;
# '\\in', ( 5, 5), Infix(Non()), [], Mem ;
# '\\notin', ( 5, 5), Infix(Non()), [], Notmem ;
# '\\', ( 8, 8), Infix(Non()), [], Setminus ;
# '\\cap', ( 8, 8), Infix(Left()), [ '\\intersect' ], Cap ;
# '\\cup', ( 8, 8), Infix(Left()), [ '\\union' ], Cup ;
# ] ;
# Sets,
# [ '\\X', (10,13), Prefix, [ '\\times' ], None ] ;
# Modal,
# List.map withdef [
# ''', (15,15), Postfix, [], Prime ;
# '~>', ( 2, 2), Infix(Non()), [ '\\leadsto' ], Leadsto ;
# 'ENABLED', ( 4,15), Prefix, [], ENABLED ;
# 'UNCHANGED', ( 4,15), Prefix, [], UNCHANGED ;
# '\\cdot', ( 5,14), Infix(Left()), [], Cdot ;
# '-+->', ( 2, 2), Infix(Non()), [], Actplus ;
# '[]', ( 4,15), Prefix, [], Box true ;
# '<>', ( 4,15), Prefix, [], Diamond ;
# ] ;
# User,
# List.map (fun (name, prec, fix, als) -> (name, prec, fix, als, None)) [
# '^', (14,14), Infix(Non()), [] ;
# '/', (13,13), Infix(Non()), [] ;
# '*', (13,13), Infix(Left()), [] ;
# '-.', (12,12), Prefix, [ '-' ] ;
# '-', (11,11), Infix(Left()), [] ;
# '+', (10,10), Infix(Left()), [] ;
# '^+', (15,15), Postfix, [] ;
# '^*', (15,15), Postfix, [] ;
# '^#', (15,15), Postfix, [] ;
# '<', ( 5, 5), Infix(Non()), [] ;
# '=<', ( 5, 5), Infix(Non()), [ '<=' ; '\\leq' ] ;
# '>', ( 5, 5), Infix(Non()), [] ;
# '>=', ( 5, 5), Infix(Non()), [ '\\geq' ] ;
# '...', ( 9, 9), Infix(Non()), [] ;
# '..', ( 9, 9), Infix(Non()), [] ;
# '|', (10,11), Infix(Left()), [] ;
# '||', (10,11), Infix(Left()), [] ;
# '&&', (13,13), Infix(Left()), [] ;
# '&', (13,13), Infix(Left()), [] ;
# '$$', ( 9,13), Infix(Left()), [] ;
# '$', ( 9,13), Infix(Left()), [] ;
# '??', ( 9,13), Infix(Left()), [] ;
# '%%', (10,11), Infix(Left()), [] ;
# '%', (10,11), Infix(Non()), [ '\\mod' ] ;
# '##', ( 9,13), Infix(Left()), [] ;
# '++', (10,10), Infix(Left()), [] ;
# '--', (11,11), Infix(Left()), [] ;
# '**', (13,13), Infix(Left()), [] ;
# '//', (13,13), Infix(Non()), [] ;
# '^^', (14,14), Infix(Non()), [] ;
# '@@', ( 6, 6), Infix(Left()), [] ;
# '!!', ( 9,13), Infix(Non()), [] ;
# '|-', ( 5, 5), Infix(Non()), [] ;
# '|=', ( 5, 5), Infix(Non()), [] ;
# '-|', ( 5, 5), Infix(Non()), [] ;
# '=|', ( 5, 5), Infix(Non()), [] ;
# '<:', ( 7, 7), Infix(Non()), [] ;
# ':>', ( 7, 7), Infix(Non()), [] ;
# ':=', ( 5, 5), Infix(Non()), [] ;
# '::=', ( 5, 5), Infix(Non()), [] ;
# '(+)', (10,10), Infix(Left()), [ '\\oplus' ] ;
# '(-)', (11,11), Infix(Left()), [ '\\ominus' ] ;
# '(.)', (13,13), Infix(Left()), [ '\\odot' ] ;
# '(/)', (13,13), Infix(Non()), [ '\\oslash' ] ;
# '(\\X)', (13,13), Infix(Left()), [ '\\otimes' ] ;
# '\\uplus', ( 9,13), Infix(Left()), [] ;
# '\\sqcap', ( 9,13), Infix(Left()), [] ;
# '\\sqcup', ( 9,13), Infix(Left()), [] ;
# '\\div', (13,13), Infix(Non()), [] ;
# '\\wr', ( 9,14), Infix(Non()), [] ;
# '\\star', (13,13), Infix(Left()), [] ;
# '\\o', (13,13), Infix(Left()), [ '\\circ' ] ;
# '\\bigcirc', (13,13), Infix(Left()), [] ;
# '\\bullet', (13,13), Infix(Left()), [] ;
# '\\prec', ( 5, 5), Infix(Non()), [] ;
# '\\succ', ( 5, 5), Infix(Non()), [] ;
# '\\preceq', ( 5, 5), Infix(Non()), [] ;
# '\\succeq', ( 5, 5), Infix(Non()), [] ;
# '\\sim', ( 5, 5), Infix(Non()), [] ;
# '\\simeq', ( 5, 5), Infix(Non()), [] ;
# '\\ll', ( 5, 5), Infix(Non()), [] ;
# '\\gg', ( 5, 5), Infix(Non()), [] ;
# '\\asymp', ( 5, 5), Infix(Non()), [] ;
# '\\subset', ( 5, 5), Infix(Non()), [] ;
# '\\supset', ( 5, 5), Infix(Non()), [] ;
# '\\supseteq', ( 5, 5), Infix(Non()), [] ;
# '\\approx', ( 5, 5), Infix(Non()), [] ;
# '\\cong', ( 5, 5), Infix(Non()), [] ;
# '\\sqsubset', ( 5, 5), Infix(Non()), [] ;
# '\\sqsubseteq', ( 5, 5), Infix(Non()), [] ;
# '\\sqsupset', ( 5, 5), Infix(Non()), [] ;
# '\\sqsupseteq', ( 5, 5), Infix(Non()), [] ;
# '\\doteq', ( 5, 5), Infix(Non()), [] ;
# '\\propto', ( 5, 5), Infix(Non()), [] ;
# ] ;
# ]
def _generate_tlaops():
tlaops = [
(
"Logic",
[
("=>", (1, 1), Infix(Non()), list(), nodes.Implies()),
("<=>", (2, 2), Infix(Non()), ["\\equiv"], nodes.Equiv()),
("/\\", (3, 3), Infix(Left()), ["\\land"], nodes.Conj()),
("\\/", (3, 3), Infix(Left()), ["\\lor"], nodes.Disj()),
("~", (4, 4), Prefix(), ["\\neg", "\\lnot"], nodes.Neg()),
("=", (5, 5), Infix(Non()), list(), nodes.Eq()),
("#", (5, 5), Infix(Non()), ["/="], nodes.Neq()),
],
),
(
"Sets",
[
("SUBSET", (8, 8), Prefix(), list(), nodes.SUBSET()),
("UNION", (8, 8), Prefix(), list(), nodes.UNION()),
("DOMAIN", (9, 9), Prefix(), list(), nodes.DOMAIN()),
("\\subseteq", (5, 5), Infix(Non()), list(), nodes.Subseteq()),
("\\in", (5, 5), Infix(Non()), list(), nodes.Mem()),
("\\notin", (5, 5), Infix(Non()), [], nodes.Notmem()),
("\\", (8, 8), Infix(Non()), ["\\setminus"], nodes.Setminus()),
("\\cap", (8, 8), Infix(Left()), ["\\intersect"], nodes.Cap()),
("\\cup", (8, 8), Infix(Left()), ["\\union"], nodes.Cup()),
("\\X", (10, 13), Infix(Left()), ["\\times"], None),
],
),
(
"Modal",
[
("'", (15, 15), Postfix(), list(), nodes.Prime()),
("~>", (2, 2), Infix(Non()), ["\\leadsto"], nodes.LeadsTo()),
("ENABLED", (4, 15), Prefix(), list(), nodes.ENABLED()),
("UNCHANGED", (4, 15), Prefix(), list(), nodes.UNCHANGED()),
("\\cdot", (5, 14), Infix(Left()), list(), nodes.Cdot()),
("-+->", (2, 2), Infix(Non()), list(), nodes.WhilePlus()),
("[]", (4, 15), Prefix(), list(), nodes.Box(True)),
("<>", (4, 15), Prefix(), list(), nodes.Diamond()),
],
),
(
"User",
[
(name, prec, fix, als, None)
for name, prec, fix, als in [
("^", (14, 14), Infix(Non()), list()),
("/", (13, 13), Infix(Non()), list()),
("*", (13, 13), Infix(Left()), list()),
("-.", (12, 12), Prefix(), ["-"]),
("-", (11, 11), Infix(Left()), list()),
("+", (10, 10), Infix(Left()), list()),
("^+", (15, 15), Postfix(), list()),
("^*", (15, 15), Postfix(), list()),
("^#", (15, 15), Postfix(), list()),
("<", (5, 5), Infix(Non()), list()),
("=<", (5, 5), Infix(Non()), ["<=", "\\leq"]),
(">", (5, 5), Infix(Non()), list()),
(">=", (5, 5), Infix(Non()), ["\\geq"]),
("...", (9, 9), Infix(Non()), list()),
("..", (9, 9), Infix(Non()), list()),
("|", (10, 11), Infix(Left()), list()),
("||", (10, 11), Infix(Left()), list()),
("&&", (13, 13), Infix(Left()), list()),
("&", (13, 13), Infix(Left()), list()),
("$$", (9, 13), Infix(Left()), list()),
("$", (9, 13), Infix(Left()), list()),
("??", (9, 13), Infix(Left()), list()),
("%%", (10, 11), Infix(Left()), list()),
("%", (10, 11), Infix(Non()), ["\\mod"]),
("##", (9, 13), Infix(Left()), list()),
("++", (10, 10), Infix(Left()), list()),
("--", (11, 11), Infix(Left()), list()),
("**", (13, 13), Infix(Left()), list()),
("//", (13, 13), Infix(Non()), list()),
("^^", (14, 14), Infix(Non()), list()),
("@@", (6, 6), Infix(Left()), list()),
("!!", (9, 13), Infix(Non()), list()),
("|-", (5, 5), Infix(Non()), list()),
("|=", (5, 5), Infix(Non()), list()),
("-|", (5, 5), Infix(Non()), list()),
("=|", (5, 5), Infix(Non()), list()),
("<:", (7, 7), Infix(Non()), list()),
(":>", (7, 7), Infix(Non()), list()),
(":=", (5, 5), Infix(Non()), list()),
("::=", (5, 5), Infix(Non()), list()),
("(+)", (10, 10), Infix(Left()), ["\\oplus"]),
("(-)", (11, 11), Infix(Left()), ["\\ominus"]),
("(.)", (13, 13), Infix(Left()), ["\\odot"]),
("(/)", (13, 13), Infix(Non()), ["\\oslash"]),
("(\\X)", (13, 13), Infix(Left()), ["\\otimes"]),
("\\uplus", (9, 13), Infix(Left()), list()),
("\\sqcap", (9, 13), Infix(Left()), list()),
("\\sqcup", (9, 13), Infix(Left()), list()),
("\\div", (13, 13), Infix(Non()), list()),
("\\wr", (9, 14), Infix(Non()), list()),
("\\star", (13, 13), Infix(Left()), list()),
("\\o", (13, 13), Infix(Left()), ["\\circ"]),
("\\bigcirc", (13, 13), Infix(Left()), list()),
("\\bullet", (13, 13), Infix(Left()), list()),
("\\prec", (5, 5), Infix(Non()), list()),
("\\succ", (5, 5), Infix(Non()), list()),
("\\preceq", (5, 5), Infix(Non()), list()),
("\\succeq", (5, 5), Infix(Non()), list()),
("\\sim", (5, 5), Infix(Non()), list()),
("\\simeq", (5, 5), Infix(Non()), list()),
("\\ll", (5, 5), Infix(Non()), list()),
("\\gg", (5, 5), Infix(Non()), list()),
("\\asymp", (5, 5), Infix(Non()), list()),
("\\subset", (5, 5), Infix(Non()), list()),
("\\supset", (5, 5), Infix(Non()), list()),
("\\supseteq", (5, 5), Infix(Non()), list()),
("\\approx", (5, 5), Infix(Non()), list()),
("\\cong", (5, 5), Infix(Non()), list()),
("\\sqsubset", (5, 5), Infix(Non()), list()),
("\\sqsubseteq", (5, 5), Infix(Non()), list()),
("\\sqsupset", (5, 5), Infix(Non()), list()),
("\\sqsupseteq", (5, 5), Infix(Non()), list()),
("\\doteq", (5, 5), Infix(Non()), list()),
("\\propto", (5, 5), Infix(Non()), list()),
]
],
),
]
return tlaops
# type tlaop = {
# name : string ;
# prec : prec ;
# fix : fixity ;
# dom : dom ;
# defn : Builtin.builtin option ;
# }
class TLAOP:
def __init__(self, name, prec, fixity, dom, defn):
self.name = name # str
self.prec = prec # Prec
self.fix = fixity # Fixity
self.dom = dom
self.defn = defn
def __repr__(self):
return (
f"TLAOP({self.name}, {self.prec}, " f"{self.fix}, {self.dom}, {self.defn})"
)
# let optable =
# let module H = Hashtbl in
# let tab = H.create 109 in
# List.iter begin
# fun (dom, ops) ->
# List.iter begin
# fun (name, prec, fix, als, defn) ->
# let op = { name = name ;
# prec = prec ;
# fix = fix ; dom = dom ;
# defn = defn }
# in
# H.add tab name op ;
# List.iter (fun s -> H.add tab s op) als
# end ops
# end tlaops ;
# tab
def _generate_optable():
tlaops = _generate_tlaops()
optable = dict()
for dom, ops in tlaops:
for name, prec, fixity, alternatives, defn in ops:
op = TLAOP(name, prec, fixity, dom, defn)
optable.setdefault(name, list())
optable[name].append(op)
for s in alternatives:
optable.setdefault(s, list())
optable[s].append(op)
return optable
optable = _generate_optable()
# pprint.pprint(optable)
# let nonfix name defn =
# { name = name ; prec = (-1, -1) ;
# fix = Nonfix ; dom = User ; defn = defn }
#
# let lookup name =
# if Hashtbl.mem optable name then
# Hashtbl.find optable name
# else
# nonfix name None
#
# (** Mapping from builtins to standard tlaops *)
# let standard_form b =
# match b with
# | TRUE -> nonfix 'TRUE' (Some TRUE)
# | FALSE -> nonfix 'FALSE' (Some FALSE)
# | Implies -> lookup '=>'
# | Equiv -> lookup '<=>'
# | Conj -> lookup '/\\'
# | Disj -> lookup '\\/'
# | Neg -> lookup '~'
# | Eq -> lookup '='
# | Neq -> lookup '#'
# | Divides ->
# {
# name = '?|';
# prec = (10, 11);
# fix = Infix(Non());
# dom = Logic;
# defn = Some Divides;
# }
#
# | STRING -> nonfix 'STRING' (Some STRING)
# | BOOLEAN -> nonfix 'BOOLEAN' (Some BOOLEAN)
# | SUBSET -> lookup 'SUBSET'
# | UNION -> lookup 'UNION'
# | DOMAIN -> lookup 'DOMAIN'
# | Subseteq -> lookup '\\subseteq'
# | Mem -> lookup '\\in'
# | Notmem -> lookup '\\notin'
# | Setminus -> lookup '\\'
# | Cap -> lookup '\\cap'
# | Cup -> lookup '\\cup'
#
# | Prime -> lookup '''
# | StrongPrime -> lookup '''
# | Leadsto -> lookup '~>'
# | ENABLED -> lookup 'ENABLED'
# | UNCHANGED -> lookup 'UNCHANGED'
# | Cdot -> lookup '\\cdot'
# | Actplus -> lookup '-+->'
# | Box _ -> lookup '[]'
# | Diamond -> lookup '<>'
#
# | Plus -> { (lookup '+') with defn = Some Plus }
# | Minus -> { (lookup '-') with defn = Some Minus }
# | Uminus -> { (lookup '-.') with defn = Some Uminus ; name = '-' }
# | Times -> { (lookup '*') with defn = Some Times }
# | Ratio -> { (lookup '/') with defn = Some Ratio }
# | Quotient -> { (lookup '\\div') with defn = Some Quotient }
# | Remainder -> { (lookup '%') with defn = Some Remainder }
# | Exp -> { (lookup '^') with defn = Some Exp }
# | Lteq -> { (lookup '=<') with defn = Some Lteq }
# | Lt -> { (lookup '<') with defn = Some Lt }
# | Gteq -> { (lookup '>=') with defn = Some Gteq }
# | Gt -> { (lookup '>') with defn = Some Gt }
# | Range -> { (lookup '..') with defn = Some Range }
# | Nat -> nonfix 'Nat' (Some Nat)
# | Int -> nonfix 'Int' (Some Int)
# | Real -> nonfix 'Real' (Some Real)
# | Infinity -> nonfix 'Infinity' (Some Infinity)
#
# | Seq -> nonfix 'Seq' (Some Seq)
# | Len -> nonfix 'Len' (Some Len)
# | BSeq -> nonfix 'BSeq' (Some BSeq)
# | Append -> nonfix 'Append' (Some Append)
# | Cat -> { (lookup '\\o') with defn = Some Cat }
# | Head -> nonfix 'Head' (Some Head)
# | Tail -> nonfix 'Tail' (Some Tail)
# | SubSeq -> nonfix 'SubSeq' (Some SubSeq)
# | SelectSeq -> nonfix 'SelectSeq' (Some SelectSeq)
#
# | OneArg -> { (lookup ':>') with defn = Some OneArg }
# | Extend -> { (lookup '@@') with defn = Some Extend }
# | Print -> nonfix 'Print' (Some Print)
# | PrintT -> nonfix 'PrintT' (Some PrintT)
# | Assert -> nonfix 'Assert' (Some Assert)
# | JavaTime -> nonfix 'JavaTime' (Some JavaTime)
# | TLCGet -> nonfix 'TLCGet' (Some TLCGet)
# | TLCSet -> nonfix 'TLCSet' (Some TLCSet)
# | Permutations -> nonfix 'Permutations' (Some Permutations)
# | SortSeq -> nonfix 'SortSeq' (Some SortSeq)
# | RandomElement -> nonfix 'RandomElement' (Some RandomElement)
# | Any -> nonfix 'Any' (Some Any)
# | ToString -> nonfix 'ToString' (Some ToString)
#
# | Unprimable -> nonfix 'Unprimable' None
# | Irregular -> nonfix 'Irregular' None
# ;;
| [] |
a1ext/DIE | DIE/UI/FunctionViewEx.py | 1a3a19f016f44cf611847ce4f0d126b136040cb6 | import networkx as nx
from awesome.context import ignored
import sark
import idaapi
import idautils
import idc
from idaapi import PluginForm
from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5
if use_qt5:
_QSortFilterProxyModel = QtCore.QSortFilterProxyModel
_MatchRecursive = QtCore.Qt.MatchRecursive
_MatchExactly = QtCore.Qt.MatchExactly
_PositionAtTop = QtWidgets.QAbstractItemView.PositionAtTop
else:
_QSortFilterProxyModel = QtGui.QSortFilterProxyModel
_MatchRecursive = QtCore.Qt.MatchFlag.MatchRecursive
_MatchExactly = QtCore.Qt.MatchFlag.MatchExactly
_PositionAtTop = QtWidgets.QAbstractItemView.ScrollHint.PositionAtTop
import DIE.UI.Die_Icons
import DIE.UI.ValueViewEx
import DIE.UI.ParserView
import DIE.UI.BPView
import DIE.Lib.IDAConnector
import DIE.Lib.DIEDb
import DIE.Lib.BpHandler
import sark.ui
class FunctionView(PluginForm):
"""
DIE Function View
"""
def __init__(self):
super(FunctionView, self).__init__()
self.value_view = None
self.bp_handler = None
self.die_icons = None
self.die_db = None
self.highligthed_items = []
def Show(self):
# Reset highlighted items
self.highligthed_items = []
return PluginForm.Show(self,
"Function View",
options=PluginForm.FORM_PERSIST)
def OnCreate(self, form):
"""
Called when the plugin form is created
"""
self.value_view = DIE.UI.ValueViewEx.get_view()
self.bp_handler = DIE.Lib.BpHandler.get_bp_handler()
self.die_icons = DIE.UI.Die_Icons.get_die_icons()
self.die_db = DIE.Lib.DIEDb.get_db()
# Get parent widget
self.parent = form_to_widget(form)
self.functionModel = QtGui.QStandardItemModel()
self.functionTreeView = QtWidgets.QTreeView()
self.functionTreeView.setExpandsOnDoubleClick(False)
#self.functionTreeView.setSortingEnabled(True)
delegate = TreeViewDelegate(self.functionTreeView)
self.functionTreeView.setItemDelegate(delegate)
self.functionTreeView.doubleClicked.connect(self.itemDoubleClickSlot)
self._model_builder(self.functionModel)
self.functionTreeView.setModel(self.functionModel)
self.functionTreeView.setColumnWidth(0, 200)
self.functionTreeView.setColumnWidth(1, 20)
self.functionTreeView.setColumnWidth(2, 20)
self.functionTreeView.setColumnWidth(3, 20)
self.functionTreeView.setColumnWidth(4, 250)
self.functionTreeView.setColumnWidth(5, 100)
self.functionTreeView.setColumnWidth(6, 20)
self.functionTreeView.setColumnWidth(7, 450)
self.functionTreeView.setColumnWidth(8, 20)
self.functionTreeView.setColumnWidth(9, 450)
# Context menus
self.functionTreeView.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.functionTreeView.customContextMenuRequested.connect(self.onCustomContextMenu)
# Actions
self.context_menu_param = None # Parameter to be passed to context menu slots
action_exclude_func = QtWidgets.QAction("Exclude Function", self.functionTreeView, triggered=lambda: self.on_exclude_func(self.context_menu_param))
action_exclude_func_adrs = QtWidgets.QAction("Exclude All Function Calls", self.functionTreeView, triggered=lambda: self.on_exclude_func_adrs(self.context_menu_param))
action_exclude_ea = QtWidgets.QAction("Exclude Address", self.functionTreeView, triggered=lambda: self.on_exclude_ea(self.context_menu_param))
action_exclude_library = QtWidgets.QAction("Exclude Library", self.functionTreeView, triggered=lambda: self.on_exclude_library(self.context_menu_param))
action_value_detail = QtWidgets.QAction("Inspect Value Details", self.functionTreeView, triggered=lambda: self.on_value_detail(self.context_menu_param))
action_show_callgraph = QtWidgets.QAction("Show Call-Graph", self.functionTreeView, triggered=lambda: self.on_show_callgraph(self.context_menu_param))
# Function ContextMenu
self.function_context_menu = QtWidgets.QMenu(self.functionTreeView)
self.function_context_menu.addAction(action_exclude_func)
self.function_context_menu.addAction(action_exclude_library)
self.function_context_menu.addAction(action_exclude_func_adrs)
# Function ea ContextMenu
self.ea_context_menu = QtWidgets.QMenu(self.functionTreeView)
self.ea_context_menu.addAction(action_exclude_ea)
self.ea_context_menu.addAction(action_show_callgraph)
# Argument value ContextMenu
self.value_context_menu = QtWidgets.QMenu(self.functionTreeView)
self.value_context_menu.addAction(action_value_detail)
# Therad ComboBox
threads = []
if self.die_db is not None:
threads = self.die_db.get_thread_list()
thread_id_list = []
thread_id_list.append("All Threads")
for thread in threads:
thread_id_list.append(str(thread.thread_num))
self.thread_id_combo = QtWidgets.QComboBox()
self.thread_id_combo.addItems(thread_id_list)
self.thread_id_combo.activated[str].connect(self.on_thread_combobox_change)
self.thread_id_label = QtWidgets.QLabel("Thread: ")
# Toolbar
self.function_toolbar = QtWidgets.QToolBar()
self.function_toolbar.addWidget(self.thread_id_label)
self.function_toolbar.addWidget(self.thread_id_combo)
# Grid
layout = QtWidgets.QGridLayout()
layout.addWidget(self.function_toolbar)
layout.addWidget(self.functionTreeView)
self.parent.setLayout(layout)
def OnClose(self, form):
idaapi.msg("Closed\n")
def isVisible(self):
"""
Is functionview visible
@return: True if visible, otherwise False
"""
try:
return self.functionTreeView.isVisible()
except:
return False
def _model_builder(self, model):
"""
Build the function model.
@param model: QStandardItemModel object
"""
model.clear() # Clear the model
root_node = model.invisibleRootItem()
self._make_model_headers(model)
if self.die_db is None:
return
# Add db functions to the model
for function in self.die_db.get_functions():
item_list_func = self._make_function_item(function)
if function.is_lib_func: # Color library function
for tmp_item in item_list_func:
tmp_item.setBackground(QtGui.QColor(184, 223, 220))
item_function = item_list_func[0]
root_node.appendRow(item_list_func)
# Add function contexts ea\occurrences for the current function
func_context_dict = self.die_db.get_function_context_dict(function)
for function_context_ea in func_context_dict:
function_context_list = func_context_dict[function_context_ea]
if not len(function_context_list) > 0:
continue
item_func_context_list = self._make_function_ea_item(function_context_list[0])
item_func_context_ea = item_func_context_list[0]
item_function.appendRow(item_func_context_list)
occurrence_num = 0
for function_context in function_context_list:
item_func_context_list = self._make_func_occur_item(function_context, occurrence_num)
item_func_context = item_func_context_list[0]
item_func_context_ea.appendRow(item_func_context_list)
self._insert_thread_data(item_function, function_context.thread_id)
self._insert_thread_data(item_func_context_ea, function_context.thread_id)
# Add function arguments to each context
current_call_values = self.die_db.get_call_values(function_context)
current_ret_values = self.die_db.get_return_values(function_context)
curret_ret_arg_value = self.die_db.get_return_arg_value(function_context)
for arg_index in xrange(0, function.arg_num):
try:
current_arg = self.die_db.get_function_arg(function, arg_index)
self._add_model_arg_value(item_func_context,
current_call_values[arg_index],
current_ret_values[arg_index],
current_arg.name,
current_arg.type)
except IndexError:
break
ret_arg = self.die_db.get_function_arg(function, -1)
if ret_arg is None:
ret_arg_type = "VOID"
else:
ret_arg_type = ret_arg.type
# Add return argument
self._add_model_arg_value(item_func_context,
None,
curret_ret_arg_value,
"ret_arg",
ret_arg_type)
# Increment occurrence counter
occurrence_num += 1
# Add non-executed function to the model
# for func_ea in idautils.Functions():
# func_name = DIE.Lib.IDAConnector.get_function_name(func_ea)
#
# if self.die_db.get_function_by_name(func_name) is None:
# item_list_func = self._make_nonexec_function_time(func_name)
#
# if function.is_lib_func: # Color library function
# for tmp_item in item_list_func:
# tmp_item.setBackground(QtGui.QColor(255, 0, 0, 127))
#
# root_node.appendRow(item_list_func)
def _make_model_headers(self, model):
"""
Set the model horizontal header data
@param model: the QStandardItemModel which headers should be set
"""
### Function Header
item_header = QtGui.QStandardItem("Function")
item_header.setToolTip("Function Name")
model.setHorizontalHeaderItem(0, item_header)
### Call number header
item_header = QtGui.QStandardItem("#")
item_header.setToolTip("Number of calls preformed to this function")
model.setHorizontalHeaderItem(1, item_header)
### Indirect Header
item_header = QtGui.QStandardItem("I")
item_header.setToolTip("Indirect Call")
model.setHorizontalHeaderItem(2, item_header)
### Indirect Header
item_header = QtGui.QStandardItem("N")
item_header.setToolTip("New Function")
model.setHorizontalHeaderItem(3, item_header)
### Indirect Header
item_header = QtGui.QStandardItem("Type")
item_header.setToolTip("Argument Type")
model.setHorizontalHeaderItem(4, item_header)
### New Function Header
item_header = QtGui.QStandardItem("Name")
item_header.setToolTip("Argument Name")
model.setHorizontalHeaderItem(5, item_header)
### Call Value Icon Header
item_header = QtGui.QStandardItem("")
model.setHorizontalHeaderItem(6, item_header)
### Call Value Header
item_header = QtGui.QStandardItem("Call Value")
item_header.setToolTip("Argument`s value on function call")
model.setHorizontalHeaderItem(7, item_header)
### Return Value Icon Header
item_header = QtGui.QStandardItem("")
model.setHorizontalHeaderItem(8, item_header)
### Return Value Header
item_header = QtGui.QStandardItem("Return Value")
item_header.setToolTip("Argument`s value on function return")
model.setHorizontalHeaderItem(9, item_header)
def _make_thread_id_data(self, thread_id):
"""
Delimit thread_id data in order to support filtering\sorting on multi-thread data items
@param thread_id: thread id to normalize
@return: a normalized string of the thread_id to be used sa data for ThreadId_Role
"""
return "t%st" % str(thread_id)
def _insert_thread_data(self, item, thread_id):
"""
Insert thread_id data into a model item.
The value found in thread_id argument will be delimited by the _make_thread_id_data function
(e.g: thread_id 123 will become 't123t')
the delimited value will then be appended to a string of concatenated (unique) child-item thread-ids
(for example a item data value can be "a123aa5672aa11112a") for threads 123, 5672 and 111112
@param item: the model item to add the data to
@param thread_id: thread_id number
@return: True if thread data was successfully added to item, otherwise False
"""
try:
current_thread_id = self._make_thread_id_data(thread_id)
thread_data = item.data(role=DIE.UI.ThreadId_Role)
if thread_data is None:
item.setData(current_thread_id, role=DIE.UI.ThreadId_Role)
elif not current_thread_id in thread_data:
item.setData(thread_data + current_thread_id, role=DIE.UI.ThreadId_Role)
return True
except Exception as ex:
idaapi.msg("Error while inserting thread data: %s\n" %ex)
return False
def _make_function_item(self, function):
"""
Build a tree item for a function name (level-0)
@param function: dbFunction object
@return: QStandradItemModel item for the function
"""
function_txt = "%s" % function.function_name
item_function = QtGui.QStandardItem(self.die_icons.icon_function, function_txt)
item_function.setData(function, role=DIE.UI.Function_Role)
function_count = self.die_db.count_function_occurs(function)
item_function_count = QtGui.QStandardItem(str(function_count))
item_function_count.setEditable(False)
item_function.setEditable(False)
item_list = [item_function,
item_function_count,
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem()]
return item_list
def _make_nonexec_function_time(self, function_name):
"""
Build a tree item for a function name (for a non-executed function)
@type: String
@param function_name: Function name
@return:
"""
item_function = QtGui.QStandardItem(self.die_icons.icon_function, function_name)
item_function_count = QtGui.QStandardItem("0")
item_function_count.setEditable(False)
item_function.setEditable(False)
item_list = [item_function, item_function_count]
return item_list
def _make_function_ea_item(self, function_context):
"""
Build a tree item for a function_ea node (level-1)
@param function_context: a dbFunction_Context object
@return: QStandradItemModel item for the function context
"""
calling_function_start = None
with ignored(sark.exceptions.SarkNoFunction):
calling_function_start = sark.Function(function_context.calling_ea).startEA
if calling_function_start is not None:
call_offset = function_context.calling_ea - calling_function_start
func_ea_txt = "%s+%s" % (function_context.calling_func_name, hex(call_offset))
else:
func_ea_txt = "[%s]:%s" % (function_context.calling_func_name, hex(function_context.calling_ea))
item_func_context_ea = QtGui.QStandardItem(func_ea_txt)
item_func_context_ea.setEditable(False)
item_func_context_ea.setData(hex(function_context.calling_ea), role=QtCore.Qt.ToolTipRole)
item_func_context_ea.setData(function_context, role=DIE.UI.FunctionContext_Role)
item_func_context_ea.setData(id(function_context), role=DIE.UI.ContextId_Role) # Used for module look-ups
item_func_is_indirect = QtGui.QStandardItem()
item_func_is_indirect.setEditable(False)
if function_context.is_indirect:
item_func_is_indirect.setIcon(self.die_icons.icon_v)
item_func_is_new = QtGui.QStandardItem()
item_func_is_new.setEditable(False)
if function_context.is_new_func:
item_func_is_new.setIcon(self.die_icons.icon_v)
item_list = [item_func_context_ea,
QtGui.QStandardItem(),
item_func_is_indirect,
item_func_is_new,
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem()]
return item_list
def _make_func_occur_item(self, function_context, occur_num):
"""
Build a tree item for function occurrence (level-2)
@param function_context: a dbFunction_Context object
@param occur_num: occurrence number
@return: QStandradItemModel item for the function occurrence
"""
func_occur_txt = "Occur %s" % str(occur_num)
item_func_context = QtGui.QStandardItem(func_occur_txt)
item_func_context.setColumnCount(5)
item_func_context.setEditable(False)
item_func_context.setData(function_context, role=DIE.UI.FunctionContext_Role)
item_func_context.setData(id(function_context), role=DIE.UI.ContextId_Role) # Used for module look-ups
item_func_context.setData(self._make_thread_id_data(function_context.thread_id), role=DIE.UI.ThreadId_Role)
item_list = [item_func_context,
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem()]
return item_list
def _add_model_arg_value(self, parent, call_value, ret_value, arg_name, arg_type, nest_depth=0):
"""
Add a debug value
@param parent:
@param call_value:
@param ret_value:
@param arg_name:
@param arg_type:
@return:
"""
arg_count = parent.rowCount()
this_row_item = QtGui.QStandardItem("")
this_row_item.setData(parent.data(role=DIE.UI.ThreadId_Role), role=DIE.UI.ThreadId_Role) # Inherit thread data from parent
# Set indentation for argument types (for nested values)
arg_ident = " " * nest_depth
arg_ident_type = arg_ident + arg_type
item_parsed_val_flag_call = QtGui.QStandardItem()
item_parsed_val_call = QtGui.QStandardItem()
item_parsed_val_flag_ret = QtGui.QStandardItem()
item_parsed_val_ret = QtGui.QStandardItem()
# Get Call Value
if call_value is not None:
parsed_vals = self.die_db.get_parsed_values(call_value)
this_row_item.setData(parsed_vals, role=DIE.UI.CallValue_Role)
if parsed_vals is not None and len(parsed_vals) > 0:
is_guessed, best_val = self.die_db.get_best_parsed_val(parsed_vals)
item_parsed_val_call = QtGui.QStandardItem(best_val.data)
if is_guessed:
item_parsed_val_flag_call.setIcon(self.die_icons.icon_question)
if len(parsed_vals) > 1: # If more the 1 item, show a combo-box
item_parsed_val_call.setData(parsed_vals, role=DIE.UI.ParsedValuesRole)
item_parsed_val_flag_call.setIcon(self.die_icons.icon_more)
else:
item_parsed_val_call.setData(parsed_vals[0], role=DIE.UI.ParsedValueRole)
else:
parsed_val_data = "NULL"
if call_value.derref_depth == 0:
parsed_val_data = "!MAX_DEREF!"
if call_value.raw_value is not None:
parsed_val_data = hex(call_value.raw_value)
if len(call_value.nested_values) > 0 or call_value.reference_flink is not None:
parsed_val_data = ""
item_parsed_val_call = QtGui.QStandardItem(parsed_val_data)
# Get return value
if ret_value is not None:
parsed_vals = self.die_db.get_parsed_values(ret_value)
this_row_item.setData(parsed_vals, role=DIE.UI.RetValue_Role)
# If len(parsed_vals)>1 create a combobox delegate.
if parsed_vals:
is_guessed, best_val = self.die_db.get_best_parsed_val(parsed_vals)
item_parsed_val_ret = QtGui.QStandardItem(best_val.data)
if is_guessed:
item_parsed_val_flag_ret.setIcon(self.die_icons.icon_question)
if len(parsed_vals) > 1: # If more the 1 item, show a combo-box
item_parsed_val_ret.setData(parsed_vals, role=DIE.UI.ParsedValuesRole)
item_parsed_val_flag_ret.setIcon(self.die_icons.icon_more)
else:
item_parsed_val_ret.setData(parsed_vals[0], role=DIE.UI.ParsedValueRole)
else:
parsed_val_data = "NULL"
if ret_value.derref_depth == 0:
parsed_val_data = "!MAX_DEREF!"
if ret_value.raw_value is not None:
parsed_val_data = hex(ret_value.raw_value)
if ret_value.nested_values or ret_value.reference_flink is not None:
parsed_val_data = ""
item_parsed_val_ret = QtGui.QStandardItem(parsed_val_data)
parent.setChild(arg_count, 0, this_row_item)
parent.setChild(arg_count, 1, QtGui.QStandardItem())
parent.setChild(arg_count, 2, QtGui.QStandardItem())
parent.setChild(arg_count, 3, QtGui.QStandardItem())
parent.setChild(arg_count, 4, QtGui.QStandardItem(arg_ident_type))
parent.setChild(arg_count, 5, QtGui.QStandardItem(arg_name))
parent.setChild(arg_count, 6, item_parsed_val_flag_call)
parent.setChild(arg_count, 7, item_parsed_val_call)
parent.setChild(arg_count, 8, item_parsed_val_flag_ret)
parent.setChild(arg_count, 9, item_parsed_val_ret)
# If current object contains reference values, add them to the module
self._add_model_arg_ref(this_row_item, call_value, ret_value, nest_depth)
# If current object is a container object, Add its members to the module
self._add_model_container_members(this_row_item, call_value, ret_value, nest_depth)
def _add_model_arg_ref(self, parent, call_value, ret_value, nest_depth=0):
"""
Add a reference value to module
@param parent:
@param call_value:
@param ret_value:
@param nest_depth:
@return:
"""
# If call debug value is a reference
if call_value is not None:
if call_value.reference_flink is not None and not call_value.is_definitely_parsed:
ref_val_call = self.die_db.get_dbg_value(call_value.reference_flink)
ref_val_ret = None
# Try to get the same reference from the return debug value.
if ret_value is not None and ret_value.type == call_value.type:
if ret_value.reference_flink is not None and not ret_value.is_definitely_parsed:
ref_val_ret = self.die_db.get_dbg_value(ret_value.reference_flink)
self._add_model_arg_value(parent, ref_val_call, ref_val_ret, ref_val_call.name, ref_val_call.type, nest_depth+1)
# If return debug value is a reference (and call value is not)
elif ret_value is not None:
if ret_value.reference_flink is not None and not ret_value.is_definitely_parsed:
ref_val = self.die_db.get_dbg_value(ret_value.reference_flink)
self._add_model_arg_value(parent, None, ref_val, ref_val.name, ref_val.type, nest_depth+1)
def _add_model_container_members(self, parent, call_value, ret_value, nest_depth=0):
"""
Add container members to module
@param parent:
@param call_value:
@param ret_value:
@param nest_depth:
@return:
"""
# If call value is a container type (struct\union\etc)
if call_value is not None and call_value.nested_values is not None:
if call_value.nested_values:
for index in xrange(0, len(call_value.nested_values)):
nested_val_call = self.die_db.get_dbg_value(call_value.nested_values[index])
nested_val_ret = None
# Try to get the same member from the return debug value.
if ret_value is not None and ret_value.type == call_value.type:
if ret_value.nested_values is not None:
if ret_value.nested_values:
nested_val_ret = self.die_db.get_dbg_value(ret_value.nested_values[index])
self._add_model_arg_value(parent, nested_val_call, nested_val_ret, nested_val_call.name, nested_val_call.type, nest_depth+1)
# If return value is a container type (and call value is not)
elif ret_value is not None:
if ret_value.nested_values is not None:
if ret_value.nested_values:
for nested_value in ret_value.nested_values:
nested_val_ret = self.die_db.get_dbg_value(nested_value)
self._add_model_arg_value(parent,
None,
nested_val_ret,
nested_val_ret.name,
nested_val_ret.type,
nest_depth+1)
def reset_function_count(self, thread_id=None):
"""
Reset the function count and set the count according to currently selected thread_id
@param thread_id: currently selected thread_id
"""
root_item = self.functionModel.item(0, 0)
rows = root_item.rowCount()
thread_id = self.thread_id_combo.currentText()
for row in xrange(0, rows):
cur_item = root_item.child(row, 0)
function = cur_item.data(role=DIE.UI.Function_Role)
if function is not None:
count = 0
if thread_id is None:
count = self.die_db.count_function_occurs(function)
else:
count = self.die_db.count_function_occurs(function, int(thread_id))
func_count_item = root_item.child(row, 1)
func_count_item.setText(str(count))
###############################################################################################
# Highlight Items.
def highlight_item(self, item):
"""
Highlight a single item
@param item: module item
"""
try:
item.setBackground(QtGui.QColor('yellow'))
cur_font = item.font()
cur_font.setBold(True)
item.setFont(cur_font)
except Exception as ex:
idaapi.msg("Error while highlighting item: %s\n" %ex)
def highlight_item_row(self, item):
"""
highlight the entire row containing a table item
@param item: table item
"""
try:
if not item.index().isValid():
return
parent = item.parent()
if parent is None:
parent = item
if not parent.hasChildren():
self.highlight_item(parent)
return
row = item.row()
column_num = parent.columnCount()
for column in xrange(0, column_num):
if self.functionModel.hasIndex(row, column, parent.index()):
cur_index = self.functionModel.index(row, column, parent.index())
self.highlight_item(self.functionModel.itemFromIndex(cur_index))
persistent_index = QtCore.QPersistentModelIndex(cur_index)
self.highligthed_items.append(persistent_index)
except Exception as ex:
idaapi.msg("Error while highlighting item row: %s\n" % ex)
def clear_highlights(self):
"""
Clear all highlighted items
@return:
"""
try:
self.functionTreeView.collapseAll()
for persistent_index in self.highligthed_items:
if persistent_index.isValid():
item = self.functionModel.itemFromIndex(persistent_index)
item.setBackground(QtGui.QColor('white'))
cur_font = item.font()
cur_font.setBold(False)
item.setFont(cur_font)
self.highligthed_items = []
except Exception as ex:
idaapi.msg("Error while clearing highlights: %s\n" % ex)
###############################################################################################
# Find Items.
def find_function(self, function_name):
"""
Find and highlight a function in current module
@param function_name: Function name
"""
self.clear_highlights()
matched_items = self.functionModel.findItems(function_name)
for item in matched_items:
self.functionTreeView.expand(item.index())
self.functionTreeView.scrollTo(item.index(), _PositionAtTop)
self.highlight_item_row(item)
def find_context_list(self, context_list):
"""
Find and highlight a list of function contexts
@param context_list: list of function contexts (of type dbFunction_Context)
"""
try:
self.clear_highlights()
root_index = self.functionModel.index(0, 0)
if not root_index.isValid():
return
for func_context in context_list:
context_id = id(func_context)
matched_items = self.functionModel.match(root_index, DIE.UI.ContextId_Role, context_id, -1, _MatchRecursive | _MatchExactly)
for index in matched_items:
if not index.isValid():
continue
# Do not highlight "ea root" items, only occurrences of it.
if not index.data().startswith("Occur"):
continue
item = self.functionModel.itemFromIndex(index)
self.functionTreeView.expand(index)
self.functionTreeView.scrollTo(index, _PositionAtTop)
self.highlight_item_row(item)
return True
except Exception as ex:
idaapi.msg("Error while looking up function context in FunctionView: %s\n" % ex)
return False
###############################################################################################
# Slots.
# @QtCore.Slot(QtCore.QModelIndex)
def itemDoubleClickSlot(self, index):
"""
TreeView DoubleClicked Slot.
@param index: QModelIndex object of the clicked tree index item.
@return:
"""
function = index.data(role=DIE.UI.Function_Role)
if function is not None:
ea = function.function_start
if function.is_lib_func:
ea = function.proto_ea
if ea is not None and ea is not idc.BADADDR:
idc.Jump(ea)
return True
func_context = index.data(role=DIE.UI.FunctionContext_Role)
if func_context is not None:
ea = func_context.calling_ea
if ea is not None and ea is not idc.BADADDR:
idc.Jump(ea)
return True
# @QtCore.Slot(QtCore.QPoint)
def onCustomContextMenu(self, point):
index = self.functionTreeView.indexAt(point)
is_function_item = index.data(role=DIE.UI.Function_Role)
is_func_context_item = index.data(role=DIE.UI.FunctionContext_Role)
is_value_item = index.data(role=DIE.UI.ParsedValueRole)
if is_function_item is not None:
self.context_menu_param = is_function_item
self.function_context_menu.exec_(self.functionTreeView.mapToGlobal(point))
if is_func_context_item is not None:
self.context_menu_param = is_func_context_item
self.ea_context_menu.exec_(self.functionTreeView.mapToGlobal(point))
if is_value_item is not None:
self.context_menu_param = is_value_item
self.value_context_menu.exec_(self.functionTreeView.mapToGlobal(point))
# @QtCore.Slot(str)
def on_exclude_func(self, function):
if not isinstance(function, DIE.Lib.DIEDb.dbFunction):
if function is not None:
raise ValueError("Wrong value sent to 'on_exclude_func_adrs': %s. excpected dbFunction_Context" % function.__class__)
else:
raise ValueError("Wrong value sent to 'on_exclude_func_adrs'")
self.bp_handler.add_bp_funcname_exception(function.function_name)
return
# @QtCore.Slot(str)
def on_exclude_func_adrs(self, function):
if not isinstance(function, DIE.Lib.DIEDb.dbFunction):
if function is not None:
raise ValueError("Wrong value sent to 'on_exclude_func_adrs': %s. excpected dbFunction_Context" % function.__class__)
else:
raise ValueError("Wrong value sent to 'on_exclude_func_adrs'")
func_context_list = self.die_db.get_function_context_list(function)
for func_context in func_context_list:
self.bp_handler.add_bp_ea_exception(func_context.calling_ea)
return
# @QtCore.Slot(str)
def on_exclude_ea(self, function_context):
if not isinstance(function_context, DIE.Lib.DIEDb.dbFunction_Context):
if function_context is not None:
raise ValueError("Wrong value sent to 'on_exclude_ea': %s. excpected dbFunction_Context" % function_context.__class__)
else:
raise ValueError("Wrong value sent to 'on_exclude_ea'")
self.bp_handler.add_bp_ea_exception(function_context.calling_ea)
return
# @QtCore.Slot(str)
def on_show_callgraph(self, function_context):
if not isinstance(function_context, DIE.Lib.DIEDb.dbFunction_Context):
if function_context is not None:
raise ValueError("Wrong value sent to 'on_show_callgraph': %s. excpected dbFunction_Context" % function_context.__class__)
else:
raise ValueError("Wrong value sent to 'on_show_callgraph'")
graph = nx.DiGraph()
call_graph = self.die_db.get_call_graph_to(function_context)
if not call_graph:
idaapi.msg("No Execution Graph")
return
for ctxt_node in call_graph:
(from_address, to_address) = ctxt_node
graph.add_edge(from_address, to_address)
function_name = self.die_db.get_function_name(function_context.function)
viewer = sark.ui.NXGraph(graph, "Callgraph for {}".format(function_name), handler=sark.ui.AddressNodeHandler())
viewer.Show()
return
# @QtCore.Slot(str)
def on_exclude_library(self, function):
if not isinstance(function, DIE.Lib.DIEDb.dbFunction):
if function is not None:
raise ValueError("Wrong value sent to 'on_exclude_func_adrs': %s. excpected dbFunction_Context" % function.__class__)
else:
raise ValueError("Wrong value sent to 'on_exclude_func_adrs'")
if function.is_lib_func and function.lib_name is not None:
self.bp_handler.add_module_exception(function.lib_name)
return
# @QtCore.Slot(str)
def on_value_detail(self, value):
if not self.value_view.isVisible():
self.value_view.Show()
self.value_view.find_value(value)
return
def on_thread_combobox_change(self, thread_id):
self.reset_function_count(thread_id) # reset function count according to currently selected thread
if thread_id == "All Threads":
if not self.functionTreeView.model() is self.functionModel:
self.functionTreeView.setModel(self.functionModel)
return
hidden_threads = ".*" + self._make_thread_id_data(thread_id) + ".*"
threadProxyModel = _QSortFilterProxyModel()
threadProxyModel.setFilterRole(DIE.UI.ThreadId_Role)
threadProxyModel.setFilterRegExp(hidden_threads)
threadProxyModel.setSourceModel(self.functionModel)
self.functionTreeView.setModel(threadProxyModel)
def on_valueview_button(self):
value_view = DIE.UI.ValueViewEx.get_view()
value_view.Show()
def on_pluginsview_button(self):
plugins_view = DIE.UI.ParserView.get_view()
plugins_view.Show()
def on_bpview_button(self):
bp_view = DIE.UI.BPView.get_view()
bp_view.Show()
###############################################################################################
# View Delegates.
class TreeViewDelegate(QtWidgets.QStyledItemDelegate):
"""
Delegate for parsed value viewing in the tree view
"""
def __init__(self, parent):
QtWidgets.QStyledItemDelegate.__init__(self, parent)
self.parent = parent
def createEditor(self, parent, option, index):
parsed_val_list = index.data(role=DIE.UI.ParsedValuesRole)
# Show combobox only if parsed_value as two or more items.
if parsed_val_list is not None and len(parsed_val_list) > 1:
lines = []
for parsed_val in parsed_val_list:
line_txt = "%d, %s, %s" % (parsed_val.score, parsed_val.data, parsed_val.description)
lines.append(line_txt)
combo_box = QtWidgets.QComboBox(parent)
combo_box.addItems(lines)
return combo_box
def setEditorData(self, editor, index):
editor.blockSignals(True)
editor.setCurrentIndex(int(index.model().data(index)))
editor.blockSignals(False)
# Singelton
function_view = None
def initialize():
global function_view
function_view = FunctionView()
def get_view():
return function_view
| [((51, 15, 53, 63), 'idaapi.PluginForm.Show', 'PluginForm.Show', (), '', False, 'from idaapi import PluginForm\n'), ((64, 22, 64, 42), 'sark.qt.form_to_widget', 'form_to_widget', ({(64, 37, 64, 41): 'form'}, {}), '(form)', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((66, 29, 66, 55), 'sark.qt.QtGui.QStandardItemModel', 'QtGui.QStandardItemModel', ({}, {}), '()', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((67, 32, 67, 53), 'sark.qt.QtWidgets.QTreeView', 'QtWidgets.QTreeView', ({}, {}), '()', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((106, 37, 106, 75), 'sark.qt.QtWidgets.QMenu', 'QtWidgets.QMenu', ({(106, 53, 106, 74): 'self.functionTreeView'}, {}), '(self.functionTreeView)', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((112, 31, 112, 69), 'sark.qt.QtWidgets.QMenu', 'QtWidgets.QMenu', ({(112, 47, 112, 68): 'self.functionTreeView'}, {}), '(self.functionTreeView)', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((117, 34, 117, 72), 'sark.qt.QtWidgets.QMenu', 'QtWidgets.QMenu', ({(117, 50, 117, 71): 'self.functionTreeView'}, {}), '(self.functionTreeView)', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((130, 31, 130, 52), 'sark.qt.QtWidgets.QComboBox', 'QtWidgets.QComboBox', ({}, {}), '()', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((134, 31, 134, 60), 'sark.qt.QtWidgets.QLabel', 'QtWidgets.QLabel', ({(134, 48, 134, 59): '"""Thread: """'}, {}), "('Thread: ')", False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((137, 32, 137, 52), 'sark.qt.QtWidgets.QToolBar', 'QtWidgets.QToolBar', ({}, {}), '()', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((142, 17, 142, 40), 'sark.qt.QtWidgets.QGridLayout', 'QtWidgets.QGridLayout', ({}, {}), '()', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((149, 8, 149, 30), 'idaapi.msg', 'idaapi.msg', ({(149, 19, 149, 29): '"""Closed\n"""'}, {}), "('Closed\\n')", False, 'import idaapi\n'), ((258, 22, 258, 53), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({(258, 42, 258, 52): '"""Function"""'}, {}), "('Function')", False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((263, 22, 263, 46), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({(263, 42, 263, 45): '"""#"""'}, {}), "('#')", False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((268, 22, 268, 46), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({(268, 42, 268, 45): '"""I"""'}, {}), "('I')", False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((273, 22, 273, 46), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({(273, 42, 273, 45): '"""N"""'}, {}), "('N')", False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((278, 22, 278, 49), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({(278, 42, 278, 48): '"""Type"""'}, {}), "('Type')", False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((283, 22, 283, 49), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({(283, 42, 283, 48): '"""Name"""'}, {}), "('Name')", False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((288, 22, 288, 45), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({(288, 42, 288, 44): '""""""'}, {}), "('')", False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((292, 22, 292, 55), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({(292, 42, 292, 54): '"""Call Value"""'}, {}), "('Call Value')", False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((297, 22, 297, 45), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({(297, 42, 297, 44): '""""""'}, {}), "('')", False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((301, 22, 301, 57), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({(301, 42, 301, 56): '"""Return Value"""'}, {}), "('Return Value')", False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((349, 24, 349, 87), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({(349, 44, 349, 72): 'self.die_icons.icon_function', (349, 74, 349, 86): 'function_txt'}, {}), '(self.die_icons.icon_function, function_txt)', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((379, 24, 379, 88), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({(379, 44, 379, 72): 'self.die_icons.icon_function', (379, 74, 379, 87): 'function_name'}, {}), '(self.die_icons.icon_function, function_name)', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((380, 30, 380, 54), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({(380, 50, 380, 53): '"""0"""'}, {}), "('0')", False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((405, 31, 405, 63), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({(405, 51, 405, 62): 'func_ea_txt'}, {}), '(func_ea_txt)', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((411, 32, 411, 53), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({}, {}), '()', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((416, 27, 416, 48), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({}, {}), '()', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((442, 28, 442, 63), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({(442, 48, 442, 62): 'func_occur_txt'}, {}), '(func_occur_txt)', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((473, 24, 473, 47), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({(473, 44, 473, 46): '""""""'}, {}), "('')", False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((480, 36, 480, 57), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({}, {}), '()', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((481, 31, 481, 52), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({}, {}), '()', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((482, 35, 482, 56), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({}, {}), '()', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((483, 30, 483, 51), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({}, {}), '()', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((877, 16, 877, 28), 'networkx.DiGraph', 'nx.DiGraph', ({}, {}), '()', True, 'import networkx as nx\n'), ((957, 8, 957, 60), 'sark.qt.QtWidgets.QStyledItemDelegate.__init__', 'QtWidgets.QStyledItemDelegate.__init__', ({(957, 47, 957, 51): 'self', (957, 53, 957, 59): 'parent'}, {}), '(self, parent)', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((360, 21, 360, 42), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({}, {}), '()', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((361, 21, 361, 42), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({}, {}), '()', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((362, 21, 362, 42), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({}, {}), '()', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((363, 21, 363, 42), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({}, {}), '()', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((364, 21, 364, 42), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({}, {}), '()', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((365, 21, 365, 42), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({}, {}), '()', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((366, 21, 366, 42), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({}, {}), '()', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((367, 21, 367, 42), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({}, {}), '()', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((396, 13, 396, 52), 'awesome.context.ignored', 'ignored', ({(396, 21, 396, 51): 'sark.exceptions.SarkNoFunction'}, {}), '(sark.exceptions.SarkNoFunction)', False, 'from awesome.context import ignored\n'), ((422, 21, 422, 42), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({}, {}), '()', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((425, 21, 425, 42), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({}, {}), '()', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((426, 21, 426, 42), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({}, {}), '()', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((427, 21, 427, 42), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({}, {}), '()', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((428, 21, 428, 42), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({}, {}), '()', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((429, 21, 429, 42), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({}, {}), '()', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((430, 21, 430, 42), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({}, {}), '()', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((450, 21, 450, 42), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({}, {}), '()', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((451, 21, 451, 42), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({}, {}), '()', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((452, 21, 452, 42), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({}, {}), '()', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((453, 21, 453, 42), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({}, {}), '()', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((454, 21, 454, 42), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({}, {}), '()', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((455, 21, 455, 42), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({}, {}), '()', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((456, 21, 456, 42), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({}, {}), '()', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((457, 21, 457, 42), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({}, {}), '()', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((458, 21, 458, 42), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({}, {}), '()', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((881, 12, 881, 44), 'idaapi.msg', 'idaapi.msg', ({(881, 23, 881, 43): '"""No Execution Graph"""'}, {}), "('No Execution Graph')", False, 'import idaapi\n'), ((971, 24, 971, 51), 'sark.qt.QtWidgets.QComboBox', 'QtWidgets.QComboBox', ({(971, 44, 971, 50): 'parent'}, {}), '(parent)', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((338, 12, 338, 69), 'idaapi.msg', 'idaapi.msg', ({(338, 23, 338, 68): "('Error while inserting thread data: %s\\n' % ex)"}, {}), "('Error while inserting thread data: %s\\n' % ex)", False, 'import idaapi\n'), ((397, 37, 397, 79), 'sark.Function', 'sark.Function', ({(397, 51, 397, 78): 'function_context.calling_ea'}, {}), '(function_context.calling_ea)', False, 'import sark\n'), ((492, 39, 492, 73), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({(492, 59, 492, 72): 'best_val.data'}, {}), '(best_val.data)', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((514, 39, 514, 75), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({(514, 59, 514, 74): 'parsed_val_data'}, {}), '(parsed_val_data)', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((524, 38, 524, 72), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({(524, 58, 524, 71): 'best_val.data'}, {}), '(best_val.data)', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((545, 38, 545, 74), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({(545, 58, 545, 73): 'parsed_val_data'}, {}), '(parsed_val_data)', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((548, 42, 548, 63), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({}, {}), '()', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((549, 42, 549, 63), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({}, {}), '()', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((550, 42, 550, 63), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({}, {}), '()', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((551, 42, 551, 77), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({(551, 62, 551, 76): 'arg_ident_type'}, {}), '(arg_ident_type)', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((552, 42, 552, 71), 'sark.qt.QtGui.QStandardItem', 'QtGui.QStandardItem', ({(552, 62, 552, 70): 'arg_name'}, {}), '(arg_name)', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((669, 31, 669, 53), 'sark.qt.QtGui.QColor', 'QtGui.QColor', ({(669, 44, 669, 52): '"""yellow"""'}, {}), "('yellow')", False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((675, 12, 675, 65), 'idaapi.msg', 'idaapi.msg', ({(675, 23, 675, 64): "('Error while highlighting item: %s\\n' % ex)"}, {}), "('Error while highlighting item: %s\\n' % ex)", False, 'import idaapi\n'), ((707, 12, 707, 70), 'idaapi.msg', 'idaapi.msg', ({(707, 23, 707, 69): "('Error while highlighting item row: %s\\n' % ex)"}, {}), "('Error while highlighting item row: %s\\n' % ex)", False, 'import idaapi\n'), ((729, 12, 729, 68), 'idaapi.msg', 'idaapi.msg', ({(729, 23, 729, 67): "('Error while clearing highlights: %s\\n' % ex)"}, {}), "('Error while clearing highlights: %s\\n' % ex)", False, 'import idaapi\n'), ((779, 12, 779, 92), 'idaapi.msg', 'idaapi.msg', ({(779, 23, 779, 91): "('Error while looking up function context in FunctionView: %s\\n' % ex)"}, {}), "('Error while looking up function context in FunctionView: %s\\n' % ex\n )", False, 'import idaapi\n'), ((800, 16, 800, 28), 'idc.Jump', 'idc.Jump', ({(800, 25, 800, 27): 'ea'}, {}), '(ea)', False, 'import idc\n'), ((807, 16, 807, 28), 'idc.Jump', 'idc.Jump', ({(807, 25, 807, 27): 'ea'}, {}), '(ea)', False, 'import idc\n'), ((889, 90, 889, 118), 'sark.ui.AddressNodeHandler', 'sark.ui.AddressNodeHandler', ({}, {}), '()', False, 'import sark\n'), ((703, 39, 703, 78), 'sark.qt.QtCore.QPersistentModelIndex', 'QtCore.QPersistentModelIndex', ({(703, 68, 703, 77): 'cur_index'}, {}), '(cur_index)', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((180, 43, 180, 70), 'sark.qt.QtGui.QColor', 'QtGui.QColor', ({(180, 56, 180, 59): '(184)', (180, 61, 180, 64): '(223)', (180, 66, 180, 69): '(220)'}, {}), '(184, 223, 220)', False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n'), ((721, 39, 721, 60), 'sark.qt.QtGui.QColor', 'QtGui.QColor', ({(721, 52, 721, 59): '"""white"""'}, {}), "('white')", False, 'from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5\n')] |
danerprog/PeerHostedDiscordBot | peerbot/PeerBot.py | 310467d8f123826a20ed92174666beb46fe35d02 | from peerbot.PeerBotStateMachine import PeerBotStateMachine
from utils.Logger import Logger
import discord
class PeerBot(discord.Client):
def __init__(self, args):
self.args = args
self.isBotReady = False
super().__init__()
async def on_ready(self):
stringifiedUserId = str(self.args['userId'])
self.logger = Logger.getLogger("PeerBot - " + stringifiedUserId)
self.logger.trace("on_ready called")
self.stateMachine = PeerBotStateMachine(await self._getStateMachineArgs(self.args))
self.isBotReady = True
self.stateMachine.start()
async def on_message(self, message):
if self.isBotReady:
self.logger.trace("on_message called")
self.stateMachine.execute(message)
async def _getStateMachineArgs(self, args):
return {
'user' : await self.fetch_user(int(args['userId'])),
'protocolChannel' : await self.fetch_channel(int(args['protocolChannelId'])),
'appInfo' : await self.application_info()
} | [((15, 22, 15, 72), 'utils.Logger.Logger.getLogger', 'Logger.getLogger', ({(15, 39, 15, 71): "'PeerBot - ' + stringifiedUserId"}, {}), "('PeerBot - ' + stringifiedUserId)", False, 'from utils.Logger import Logger\n')] |
ynang/airflow-jobs-1 | dags/oss_know/oss_know_dags/dags_github/dag_github_init_issues_timeline.py | 857e9dfbc2444dc1d23dd2b0463fe89108f01b89 | from datetime import datetime
from airflow import DAG
from airflow.operators.python import PythonOperator
# v0.0.1
from oss_know.libs.base_dict.variable_key import NEED_INIT_GITHUB_ISSUES_TIMELINE_REPOS, GITHUB_TOKENS, \
OPENSEARCH_CONN_DATA, PROXY_CONFS
from oss_know.libs.util.proxy import KuaiProxyService, ProxyManager, GithubTokenProxyAccommodator
from oss_know.libs.util.token import TokenManager
with DAG(
dag_id='github_init_issues_timeline_v1',
schedule_interval=None,
start_date=datetime(2000, 1, 1),
catchup=False,
tags=['github'],
) as dag:
def scheduler_init_github_issues_timeline(ds, **kwargs):
return 'End:scheduler_init_github_issues_timeline'
op_scheduler_init_github_issues_timeline = PythonOperator(
task_id='op_scheduler_init_github_issues_timeline',
python_callable=scheduler_init_github_issues_timeline
)
def do_init_github_issues_timeline(params):
from airflow.models import Variable
from oss_know.libs.github import init_issues_timeline
github_tokens = Variable.get(GITHUB_TOKENS, deserialize_json=True)
opensearch_conn_info = Variable.get(OPENSEARCH_CONN_DATA, deserialize_json=True)
proxy_confs = Variable.get(PROXY_CONFS, deserialize_json=True)
proxies = []
for line in proxy_confs['reserved_proxies']:
proxies.append(f'http://{line}')
proxy_service = KuaiProxyService(proxy_confs['api_url'], proxy_confs['orderid'])
proxy_manager = ProxyManager(proxies, proxy_service)
token_manager = TokenManager(github_tokens)
proxy_accommodator = GithubTokenProxyAccommodator(token_manager, proxy_manager, shuffle=True,
policy=GithubTokenProxyAccommodator.POLICY_FIXED_MAP)
owner = params["owner"]
repo = params["repo"]
# since = params["since"]
since = None
init_issues_timeline.init_sync_github_issues_timeline(opensearch_conn_info, owner, repo,
proxy_accommodator, since)
return params
need_do_init_ops = []
from airflow.models import Variable
need_init_github_issues_timeline_repos = Variable.get(NEED_INIT_GITHUB_ISSUES_TIMELINE_REPOS,
deserialize_json=True)
for need_init_github_issues_timeline_repo in need_init_github_issues_timeline_repos:
op_do_init_github_issues_timeline = PythonOperator(
task_id='op_do_init_github_issues_timeline_{owner}_{repo}'.format(
owner=need_init_github_issues_timeline_repo["owner"],
repo=need_init_github_issues_timeline_repo["repo"]),
python_callable=do_init_github_issues_timeline,
op_kwargs={'params': need_init_github_issues_timeline_repo},
)
op_scheduler_init_github_issues_timeline >> op_do_init_github_issues_timeline
| [((23, 47, 26, 5), 'airflow.operators.python.PythonOperator', 'PythonOperator', (), '', False, 'from airflow.operators.python import PythonOperator\n'), ((63, 45, 64, 80), 'airflow.models.Variable.get', 'Variable.get', (), '', False, 'from airflow.models import Variable\n'), ((33, 24, 33, 74), 'airflow.models.Variable.get', 'Variable.get', (), '', False, 'from airflow.models import Variable\n'), ((34, 31, 34, 88), 'airflow.models.Variable.get', 'Variable.get', (), '', False, 'from airflow.models import Variable\n'), ((36, 22, 36, 70), 'airflow.models.Variable.get', 'Variable.get', (), '', False, 'from airflow.models import Variable\n'), ((41, 24, 41, 88), 'oss_know.libs.util.proxy.KuaiProxyService', 'KuaiProxyService', ({(41, 41, 41, 63): "proxy_confs['api_url']", (41, 65, 41, 87): "proxy_confs['orderid']"}, {}), "(proxy_confs['api_url'], proxy_confs['orderid'])", False, 'from oss_know.libs.util.proxy import KuaiProxyService, ProxyManager, GithubTokenProxyAccommodator\n'), ((42, 24, 42, 60), 'oss_know.libs.util.proxy.ProxyManager', 'ProxyManager', ({(42, 37, 42, 44): 'proxies', (42, 46, 42, 59): 'proxy_service'}, {}), '(proxies, proxy_service)', False, 'from oss_know.libs.util.proxy import KuaiProxyService, ProxyManager, GithubTokenProxyAccommodator\n'), ((43, 24, 43, 51), 'oss_know.libs.util.token.TokenManager', 'TokenManager', ({(43, 37, 43, 50): 'github_tokens'}, {}), '(github_tokens)', False, 'from oss_know.libs.util.token import TokenManager\n'), ((45, 29, 46, 111), 'oss_know.libs.util.proxy.GithubTokenProxyAccommodator', 'GithubTokenProxyAccommodator', (), '', False, 'from oss_know.libs.util.proxy import KuaiProxyService, ProxyManager, GithubTokenProxyAccommodator\n'), ((53, 8, 54, 88), 'oss_know.libs.github.init_issues_timeline.init_sync_github_issues_timeline', 'init_issues_timeline.init_sync_github_issues_timeline', ({(53, 62, 53, 82): 'opensearch_conn_info', (53, 84, 53, 89): 'owner', (53, 91, 53, 95): 'repo', (54, 62, 54, 80): 'proxy_accommodator', (54, 82, 54, 87): 'since'}, {}), '(opensearch_conn_info,\n owner, repo, proxy_accommodator, since)', False, 'from oss_know.libs.github import init_issues_timeline\n'), ((15, 19, 15, 39), 'datetime.datetime', 'datetime', ({(15, 28, 15, 32): '(2000)', (15, 34, 15, 35): '(1)', (15, 37, 15, 38): '(1)'}, {}), '(2000, 1, 1)', False, 'from datetime import datetime\n')] |
laundry-96/conan | conans/conan.py | fd938f7220ca042d94c42ec5eb607ee69c6785a3 | import sys
from conans.client.command import main
def run():
main(sys.argv[1:])
if __name__ == '__main__':
run()
| [((7, 4, 7, 22), 'conans.client.command.main', 'main', ({(7, 9, 7, 21): 'sys.argv[1:]'}, {}), '(sys.argv[1:])', False, 'from conans.client.command import main\n')] |
lunika/richie | tests/apps/persons/test_cms_plugins_person.py | b0b04d0ffc0b16f2f1b8a8201418b8f86941e45f | # -*- coding: utf-8 -*-
"""
Unit tests for the Person plugin and its model
"""
from django import forms
from django.conf import settings
from django.test import TestCase
from cms.api import add_plugin, create_page
from cmsplugin_plain_text.cms_plugins import PlaintextPlugin
from djangocms_picture.cms_plugins import PicturePlugin
from richie.apps.core.factories import FilerImageFactory, UserFactory
from richie.apps.core.helpers import create_i18n_page
from richie.apps.persons.cms_plugins import PersonPlugin
from richie.apps.persons.factories import PersonFactory
from richie.apps.persons.models import PersonPluginModel
class PersonPluginTestCase(TestCase):
"""
Test that PersonPlugin correctly displays a Person's page placeholders content
"""
def test_cms_plugins_person_form_page_choices(self):
"""
The form to create a person plugin should only list person pages in the select box.
"""
class PersonPluginModelForm(forms.ModelForm):
"""A form for testing the choices in the select box"""
class Meta:
model = PersonPluginModel
exclude = ()
person = PersonFactory()
other_page_title = "other page"
create_page(other_page_title, "richie/fullwidth.html", settings.LANGUAGE_CODE)
plugin_form = PersonPluginModelForm()
self.assertIn(person.get_full_name(), plugin_form.as_table())
self.assertNotIn(other_page_title, plugin_form.as_table())
def test_cms_plugins_person_render(self):
"""
Test that a PersonPlugin correctly renders person's page specific information
"""
# Create a filer fake image
staff = UserFactory(is_staff=True, is_superuser=True)
image = FilerImageFactory(owner=staff)
# Create a Person
person = PersonFactory()
person_page = person.extended_object
# Add portrait to related placeholder
portrait_placeholder = person_page.placeholders.get(slot="portrait")
add_plugin(
portrait_placeholder,
PicturePlugin,
"en",
**{"picture": image, "attributes": {"alt": "portrait description"}}
)
add_plugin(
portrait_placeholder,
PicturePlugin,
"fr",
**{"picture": image, "attributes": {"alt": "description du portrait"}}
)
# A resume to related placeholder
resume_placeholder = person_page.placeholders.get(slot="resume")
add_plugin(
resume_placeholder, PlaintextPlugin, "en", **{"body": "A short resume"}
)
add_plugin(
resume_placeholder, PlaintextPlugin, "fr", **{"body": "Un résumé court"}
)
# Create a page to add the plugin to
page = create_i18n_page({"en": "A page", "fr": "Une page"})
placeholder = page.placeholders.get(slot="maincontent")
add_plugin(placeholder, PersonPlugin, "en", **{"person": person})
add_plugin(placeholder, PersonPlugin, "fr", **{"person": person})
page.publish("en")
page.publish("fr")
# Check the page content in English
url = page.get_absolute_url(language="en")
response = self.client.get(url)
# Person's name should be present as a link to the cms page
# And CMS page title should be in title attribute of the link
self.assertContains(
response,
'<a href="{url}" title="{page_title}">'.format(
url=person_page.get_absolute_url(), page_title=person_page.get_title()
),
status_code=200,
)
self.assertContains(response, person.get_full_name(), html=True)
# Person's portrait and its properties should be present
# pylint: disable=no-member
self.assertContains(response, image.file.name)
# Short resume should be present
self.assertContains(
response,
'<div class="person-plugin__content__text">A short resume</div>',
html=True,
)
# The person's full name should be wrapped in a h2
self.assertContains(
response,
'<h2 class="person-plugin__content__title">{:s}</h2>'.format(
person.get_full_name()
),
html=True,
)
# Same checks in French
url = page.get_absolute_url(language="fr")
response = self.client.get(url)
self.assertContains(
response,
'<a href="{url}" title="{page_title}">'.format(
url=person_page.get_absolute_url(), page_title=person_page.get_title()
),
status_code=200,
)
# pylint: disable=no-member
self.assertContains(response, image.file.name)
self.assertContains(
response,
'<div class="person-plugin__content__text">Un résumé court</div>',
html=True,
)
| [((37, 17, 37, 32), 'richie.apps.persons.factories.PersonFactory', 'PersonFactory', ({}, {}), '()', False, 'from richie.apps.persons.factories import PersonFactory\n'), ((39, 8, 39, 86), 'cms.api.create_page', 'create_page', ({(39, 20, 39, 36): 'other_page_title', (39, 38, 39, 61): '"""richie/fullwidth.html"""', (39, 63, 39, 85): 'settings.LANGUAGE_CODE'}, {}), "(other_page_title, 'richie/fullwidth.html', settings.LANGUAGE_CODE)", False, 'from cms.api import add_plugin, create_page\n'), ((49, 16, 49, 61), 'richie.apps.core.factories.UserFactory', 'UserFactory', (), '', False, 'from richie.apps.core.factories import FilerImageFactory, UserFactory\n'), ((50, 16, 50, 46), 'richie.apps.core.factories.FilerImageFactory', 'FilerImageFactory', (), '', False, 'from richie.apps.core.factories import FilerImageFactory, UserFactory\n'), ((53, 17, 53, 32), 'richie.apps.persons.factories.PersonFactory', 'PersonFactory', ({}, {}), '()', False, 'from richie.apps.persons.factories import PersonFactory\n'), ((58, 8, 63, 9), 'cms.api.add_plugin', 'add_plugin', ({(59, 12, 59, 32): 'portrait_placeholder', (60, 12, 60, 25): 'PicturePlugin', (61, 12, 61, 16): '"""en"""'}, {}), "(portrait_placeholder, PicturePlugin, 'en', **{'picture': image,\n 'attributes': {'alt': 'portrait description'}})", False, 'from cms.api import add_plugin, create_page\n'), ((64, 8, 69, 9), 'cms.api.add_plugin', 'add_plugin', ({(65, 12, 65, 32): 'portrait_placeholder', (66, 12, 66, 25): 'PicturePlugin', (67, 12, 67, 16): '"""fr"""'}, {}), "(portrait_placeholder, PicturePlugin, 'fr', **{'picture': image,\n 'attributes': {'alt': 'description du portrait'}})", False, 'from cms.api import add_plugin, create_page\n'), ((72, 8, 74, 9), 'cms.api.add_plugin', 'add_plugin', ({(73, 12, 73, 30): 'resume_placeholder', (73, 32, 73, 47): 'PlaintextPlugin', (73, 49, 73, 53): '"""en"""'}, {}), "(resume_placeholder, PlaintextPlugin, 'en', **{'body':\n 'A short resume'})", False, 'from cms.api import add_plugin, create_page\n'), ((75, 8, 77, 9), 'cms.api.add_plugin', 'add_plugin', ({(76, 12, 76, 30): 'resume_placeholder', (76, 32, 76, 47): 'PlaintextPlugin', (76, 49, 76, 53): '"""fr"""'}, {}), "(resume_placeholder, PlaintextPlugin, 'fr', **{'body':\n 'Un résumé court'})", False, 'from cms.api import add_plugin, create_page\n'), ((80, 15, 80, 67), 'richie.apps.core.helpers.create_i18n_page', 'create_i18n_page', ({(80, 32, 80, 66): "{'en': 'A page', 'fr': 'Une page'}"}, {}), "({'en': 'A page', 'fr': 'Une page'})", False, 'from richie.apps.core.helpers import create_i18n_page\n'), ((82, 8, 82, 73), 'cms.api.add_plugin', 'add_plugin', ({(82, 19, 82, 30): 'placeholder', (82, 32, 82, 44): 'PersonPlugin', (82, 46, 82, 50): '"""en"""'}, {}), "(placeholder, PersonPlugin, 'en', **{'person': person})", False, 'from cms.api import add_plugin, create_page\n'), ((83, 8, 83, 73), 'cms.api.add_plugin', 'add_plugin', ({(83, 19, 83, 30): 'placeholder', (83, 32, 83, 44): 'PersonPlugin', (83, 46, 83, 50): '"""fr"""'}, {}), "(placeholder, PersonPlugin, 'fr', **{'person': person})", False, 'from cms.api import add_plugin, create_page\n')] |
Mathics3/mathics-core | mathics/core/subexpression.py | 54dc3c00a42cd893c6430054e125291b6eb55ead | # cython: language_level=3
# -*- coding: utf-8 -*-
from mathics.core.expression import Expression
from mathics.core.symbols import Atom, Symbol
from mathics.core.atoms import Integer
from mathics.builtin.base import MessageException
"""
This module provides some infrastructure to deal with SubExpressions.
"""
def _pspec_span_to_tuple(pspec, expr):
"""
This function takes an expression and a Mathics
`Span` Expression and returns a tuple with the positions
of the leaves.
"""
start = 1
stop = None
step = 1
leaves = pspec.leaves
if len(leaves) > 3:
raise MessageException("Part", "span", leaves)
if len(leaves) > 0:
start = leaves[0].get_int_value()
if len(leaves) > 1:
stop = leaves[1].get_int_value()
if stop is None:
if leaves[1].get_name() == "System`All":
stop = None
else:
raise MessageException("Part", "span", pspec)
else:
stop = stop - 1 if stop > 0 else len(expr.leaves) + stop
if len(pspec.leaves) > 2:
step = leaves[2].get_int_value()
if start is None or step is None:
raise MessageException("Part", "span", pspec)
if start == 0 or stop == 0:
# index 0 is undefined
raise MessageException("Part", "span", Integer(0))
if start < 0:
start = len(expr.leaves) - start
else:
start = start - 1
if stop is None:
stop = 0 if step < 0 else len(expr.leaves) - 1
stop = stop + 1 if step > 0 else stop - 1
return tuple(k for k in range(start, stop, step))
class ExpressionPointer(object):
"""
This class represents a reference to a leaf in an expression.
Supports a minimal part of the basic interface of `mathics.core.symbols.BaseElement`.
"""
def __init__(self, expr, pos=None):
"""
Initializes a ExpressionPointer pointing to the leaf in position `pos`
of `expr`.
expr: can be an Expression, a Symbol, or another ExpressionPointer
pos: int or None
If `pos==0`, then the pointer points to the `head` of the expression.
If `pos` is `None`, it points out the whole expression.
"""
if pos is None:
if type(expr) is ExpressionPointer:
self.parent = expr.parent
self.position = expr.position
else:
self.parent = expr
self.position = None
else:
self.parent = expr
self.position = pos
def __str__(self) -> str:
return "%s[[%s]]" % (self.parent, self.position)
def __repr__(self) -> str:
return self.__str__()
@property
def original(self):
return None
@original.setter
def original(self, value):
raise ValueError("Expression.original is write protected.")
@property
def head(self):
pos = self.position
if pos is None:
return self.parent.head
elif pos == 0:
return self.parent.head.head
return self.parent.leaves[pos - 1].head
@head.setter
def head(self, value):
raise ValueError("ExpressionPointer.head is write protected.")
@property
def leaves(self):
pos = self.position
if pos is None:
return self.parent.leaves
elif pos == 0:
self.parent.head.leaves
return self.parent.leaves[pos - 1].leaves
@leaves.setter
def leaves(self, value):
raise ValueError("ExpressionPointer.leaves is write protected.")
def get_head_name(self):
return self.head.get_name()
def is_atom(self):
pos = self.position
if pos is None:
return self.parent.is_atom()
elif pos == 0:
return self.parent.head.is_atom()
return self.parent.leaves[pos - 1].is_atom()
def to_expression(self):
parent = self.parent
p = self.position
if p == 0:
if isinstance(parent, Symbol):
return parent
else:
return parent.head.copy()
else:
leaf = self.parent.leaves[p - 1]
if isinstance(leaf, Atom):
return leaf
else:
return leaf.copy()
def replace(self, new):
"""
This method replaces the value pointed out by a `new` value.
"""
# First, look for the ancestor that is not an ExpressionPointer,
# keeping the positions of each step:
parent = self.parent
pos = [self.position]
while type(parent) is ExpressionPointer:
position = parent.position
if position is None:
parent = parent.parent
continue
pos.append(parent.position)
parent = parent.parent
# At this point, we hit the expression, and we have
# the path to reach the position
i = pos.pop()
try:
while pos:
if i == 0:
parent = parent._head
else:
parent = parent.elements[i - 1]
i = pos.pop()
except Exception:
raise MessageException("Part", "span", pos)
# Now, we have a pointer to an element in a true `Expression`.
# Now, set it to the new value.
if i == 0:
parent.set_head(new)
else:
parent.set_element(i - 1, new)
class SubExpression(object):
"""
This class represents a Subexpression of an existing Expression.
Assignment to a subexpression results in the change of the original Expression.
"""
def __new__(cls, expr, pos=None):
"""
`expr` can be an `Expression`, a `ExpressionPointer` or
another `SubExpression`
`pos` can be `None`, an integer value or an `Expression` that
indicates a subset of leaves in the original `Expression`.
If `pos` points out to a single whole leaf of `expr`, then
returns an `ExpressionPointer`.
"""
# If pos is a list, take the first element, and
# store the remainder.
if type(pos) in (tuple, list):
pos, rem_pos = pos[0], pos[1:]
if len(rem_pos) == 0:
rem_pos = None
else:
rem_pos = None
# Trivial conversion: if pos is an `Integer`, convert
# to a Python native int
if type(pos) is Integer:
pos = pos.get_int_value()
# pos == `System`All`
elif isinstance(pos, Symbol) and pos.get_name() == "System`All":
pos = None
elif type(pos) is Expression:
if pos.has_form("System`List", None):
tuple_pos = [i.get_int_value() for i in pos.leaves]
if any([i is None for i in tuple_pos]):
raise MessageException("Part", "pspec", pos)
pos = tuple_pos
elif pos.has_form("System`Span", None):
pos = _pspec_span_to_tuple(pos, expr)
else:
raise MessageException("Part", "pspec", pos)
if pos is None or type(pos) is int:
if rem_pos is None:
return ExpressionPointer(expr, pos)
else:
return SubExpression(ExpressionPointer(expr, pos), rem_pos)
elif type(pos) is tuple:
self = super(SubExpression, cls).__new__(cls)
self._headp = ExpressionPointer(expr.head, 0)
self._elementsp = [
SubExpression(ExpressionPointer(expr, k + 1), rem_pos) for k in pos
]
return self
def is_atom(self):
return False
def __str__(self):
return (
self.head.__str__()
+ "[\n"
+ ",\n".join(["\t " + leaf.__str__() for leaf in self.leaves])
+ "\n\t]"
)
def __repr__(self):
return self.__str__()
@property
def head(self):
return self._headp
@head.setter
def head(self, value):
raise ValueError("SubExpression.head is write protected.")
def get_head_name(self):
return self._headp.parent.get_head_name()
@property
def elements(self):
return self._elementsp
@elements.setter
def elements(self, value):
raise ValueError("SubExpression.leaves is write protected.")
@property
def leaves(self):
return self._elementsp
@leaves.setter
def leaves(self, value):
raise ValueError("SubExpression.leaves is write protected.")
def to_expression(self):
return Expression(
self._headp.to_expression(),
*(leaf.to_expression() for leaf in self._elementsp)
)
def replace(self, new):
"""
Asigns `new` to the subexpression, according to the logic of `mathics.core.walk_parts`
"""
if (new.has_form("List", None) or new.get_head_name() == "System`List") and len(
new.leaves
) == len(self._elementsp):
for leaf, sub_new in zip(self._elementsp, new.leaves):
leaf.replace(sub_new)
else:
for leaf in self._elementsp:
leaf.replace(new)
| [((27, 14, 27, 54), 'mathics.builtin.base.MessageException', 'MessageException', ({(27, 31, 27, 37): '"""Part"""', (27, 39, 27, 45): '"""span"""', (27, 47, 27, 53): 'leaves'}, {}), "('Part', 'span', leaves)", False, 'from mathics.builtin.base import MessageException\n'), ((44, 14, 44, 53), 'mathics.builtin.base.MessageException', 'MessageException', ({(44, 31, 44, 37): '"""Part"""', (44, 39, 44, 45): '"""span"""', (44, 47, 44, 52): 'pspec'}, {}), "('Part', 'span', pspec)", False, 'from mathics.builtin.base import MessageException\n'), ((48, 47, 48, 57), 'mathics.core.atoms.Integer', 'Integer', ({(48, 55, 48, 56): '(0)'}, {}), '(0)', False, 'from mathics.core.atoms import Integer\n'), ((36, 22, 36, 61), 'mathics.builtin.base.MessageException', 'MessageException', ({(36, 39, 36, 45): '"""Part"""', (36, 47, 36, 53): '"""span"""', (36, 55, 36, 60): 'pspec'}, {}), "('Part', 'span', pspec)", False, 'from mathics.builtin.base import MessageException\n'), ((183, 18, 183, 55), 'mathics.builtin.base.MessageException', 'MessageException', ({(183, 35, 183, 41): '"""Part"""', (183, 43, 183, 49): '"""span"""', (183, 51, 183, 54): 'pos'}, {}), "('Part', 'span', pos)", False, 'from mathics.builtin.base import MessageException\n'), ((228, 26, 228, 64), 'mathics.builtin.base.MessageException', 'MessageException', ({(228, 43, 228, 49): '"""Part"""', (228, 51, 228, 58): '"""pspec"""', (228, 60, 228, 63): 'pos'}, {}), "('Part', 'pspec', pos)", False, 'from mathics.builtin.base import MessageException\n'), ((233, 22, 233, 60), 'mathics.builtin.base.MessageException', 'MessageException', ({(233, 39, 233, 45): '"""Part"""', (233, 47, 233, 54): '"""pspec"""', (233, 56, 233, 59): 'pos'}, {}), "('Part', 'pspec', pos)", False, 'from mathics.builtin.base import MessageException\n')] |
webu/pyopenproject | pyopenproject/business/services/command/configuration/find.py | 40b2cb9fe0fa3f89bc0fe2a3be323422d9ecf966 | from pyopenproject.api_connection.exceptions.request_exception import RequestError
from pyopenproject.api_connection.requests.get_request import GetRequest
from pyopenproject.business.exception.business_error import BusinessError
from pyopenproject.business.services.command.configuration.configuration_command import ConfigurationCommand
from pyopenproject.model.configuration import Configuration
class Find(ConfigurationCommand):
def __init__(self, connection):
"""Constructor for class Find, from ConfigurationCommand.
:param connection: The connection data
"""
super().__init__(connection)
def execute(self):
try:
json_obj = GetRequest(self.connection, f"{self.CONTEXT}").execute()
return Configuration(json_obj)
except RequestError as re:
raise BusinessError("Error listing configuration") from re
| [((20, 19, 20, 42), 'pyopenproject.model.configuration.Configuration', 'Configuration', ({(20, 33, 20, 41): 'json_obj'}, {}), '(json_obj)', False, 'from pyopenproject.model.configuration import Configuration\n'), ((22, 18, 22, 62), 'pyopenproject.business.exception.business_error.BusinessError', 'BusinessError', ({(22, 32, 22, 61): '"""Error listing configuration"""'}, {}), "('Error listing configuration')", False, 'from pyopenproject.business.exception.business_error import BusinessError\n'), ((19, 23, 19, 69), 'pyopenproject.api_connection.requests.get_request.GetRequest', 'GetRequest', ({(19, 34, 19, 49): 'self.connection', (19, 51, 19, 68): 'f"""{self.CONTEXT}"""'}, {}), "(self.connection, f'{self.CONTEXT}')", False, 'from pyopenproject.api_connection.requests.get_request import GetRequest\n')] |
vrautela/treadmill | lib/python/treadmill/tests/api/cell_test.py | 05e47fa8acdf8bad7af78e737efb26ea6488de82 | """Cell API tests.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import mock
from treadmill import admin
from treadmill.api import cell
class ApiCellTest(unittest.TestCase):
"""treadmill.api.cell tests."""
def setUp(self):
self.cell = cell.API()
def tearDown(self):
pass
@mock.patch('treadmill.context.AdminContext.conn',
mock.Mock(return_value=admin.Admin(None, None)))
@mock.patch('treadmill.admin.Cell.list', mock.Mock(return_value=[]))
def test_list(self):
"""Dummy test for treadmill.api.cell._list()"""
self.cell.list()
cell_admin = admin.Cell(None)
self.assertTrue(cell_admin.list.called)
@mock.patch('treadmill.context.AdminContext.conn',
mock.Mock(return_value=admin.Admin(None, None)))
@mock.patch('treadmill.admin.Cell.get',
mock.Mock(return_value={'cell': 'ny-999-cell'}))
def test_get(self):
"""Dummy test for treadmill.api.cell.get()"""
cell_admin = admin.Cell(None)
self.cell.get('some-cell')
cell_admin.get.assert_called_with('some-cell')
@mock.patch('treadmill.context.AdminContext.conn',
mock.Mock(return_value=admin.Admin(None, None)))
@mock.patch('treadmill.admin.Cell.get',
mock.Mock(return_value={'cell': 'ny-999-cell'}))
@mock.patch('treadmill.admin.Cell.create', mock.Mock())
def test_create(self):
"""Dummy test for treadmill.api.cell.create()"""
cell_admin = admin.Cell(None)
self.cell.create('some-cell', {'location': 'ny',
'treadmillid': 'treadmld',
'version': 'v3'})
cell_admin.get.assert_called_with('some-cell', dirty=True)
if __name__ == '__main__':
unittest.main()
| [((60, 4, 60, 19), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n'), ((21, 20, 21, 30), 'treadmill.api.cell.API', 'cell.API', ({}, {}), '()', False, 'from treadmill.api import cell\n'), ((32, 21, 32, 37), 'treadmill.admin.Cell', 'admin.Cell', ({(32, 32, 32, 36): 'None'}, {}), '(None)', False, 'from treadmill import admin\n'), ((28, 45, 28, 71), 'mock.Mock', 'mock.Mock', (), '', False, 'import mock\n'), ((41, 21, 41, 37), 'treadmill.admin.Cell', 'admin.Cell', ({(41, 32, 41, 36): 'None'}, {}), '(None)', False, 'from treadmill import admin\n'), ((38, 16, 38, 63), 'mock.Mock', 'mock.Mock', (), '', False, 'import mock\n'), ((52, 21, 52, 37), 'treadmill.admin.Cell', 'admin.Cell', ({(52, 32, 52, 36): 'None'}, {}), '(None)', False, 'from treadmill import admin\n'), ((48, 16, 48, 63), 'mock.Mock', 'mock.Mock', (), '', False, 'import mock\n'), ((49, 47, 49, 58), 'mock.Mock', 'mock.Mock', ({}, {}), '()', False, 'import mock\n'), ((27, 39, 27, 62), 'treadmill.admin.Admin', 'admin.Admin', ({(27, 51, 27, 55): 'None', (27, 57, 27, 61): 'None'}, {}), '(None, None)', False, 'from treadmill import admin\n'), ((36, 39, 36, 62), 'treadmill.admin.Admin', 'admin.Admin', ({(36, 51, 36, 55): 'None', (36, 57, 36, 61): 'None'}, {}), '(None, None)', False, 'from treadmill import admin\n'), ((46, 39, 46, 62), 'treadmill.admin.Admin', 'admin.Admin', ({(46, 51, 46, 55): 'None', (46, 57, 46, 61): 'None'}, {}), '(None, None)', False, 'from treadmill import admin\n')] |
microsoft/ai-python-package | src/python_package/__init__.py | 770f5167ebc32b5410739f04c5730e68f84785c9 | # -------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in project root for information.
# -------------------------------------------------------------
"""Python Package Template"""
from __future__ import annotations
__version__ = "0.0.2"
| [] |
themoodymann/piChain | tests/test_integration_partition.py | 4de9e8da3994901371713b68bc05295fe6676571 | """Integration test: Test partition of piChain nodes.
Note: run tests with default setting values in config.py.
"""
import time
from tests.util import MultiNodeTest
class MultiNodeTestPartition(MultiNodeTest):
def test_scenario30_partition(self):
self.start_processes_with_test_scenario(30, 5)
time.sleep(8)
self.terminate_processes()
node0_blocks = self.extract_committed_blocks_single_process(0)
node1_blocks = self.extract_committed_blocks_single_process(1)
node2_blocks = self.extract_committed_blocks_single_process(2)
node3_blocks = self.extract_committed_blocks_single_process(3)
node4_blocks = self.extract_committed_blocks_single_process(4)
assert len(node0_blocks) > 0
assert node0_blocks == node1_blocks
assert node2_blocks == node1_blocks
assert node3_blocks == node1_blocks
assert node4_blocks == node1_blocks
def test_scenario31_partition(self):
self.start_processes_with_test_scenario(31, 5)
time.sleep(8)
self.terminate_processes()
node0_blocks = self.extract_committed_blocks_single_process(0)
node1_blocks = self.extract_committed_blocks_single_process(1)
node2_blocks = self.extract_committed_blocks_single_process(2)
node3_blocks = self.extract_committed_blocks_single_process(3)
node4_blocks = self.extract_committed_blocks_single_process(4)
assert len(node0_blocks) > 0
assert node0_blocks == node1_blocks
assert node2_blocks == node1_blocks
assert node3_blocks == node1_blocks
assert node4_blocks == node1_blocks
def test_scenario32_partition(self):
self.start_processes_with_test_scenario(32, 5)
time.sleep(15)
self.terminate_processes()
node0_blocks = self.extract_committed_blocks_single_process(0)
node1_blocks = self.extract_committed_blocks_single_process(1)
node2_blocks = self.extract_committed_blocks_single_process(2)
node3_blocks = self.extract_committed_blocks_single_process(3)
node4_blocks = self.extract_committed_blocks_single_process(4)
assert len(node0_blocks) > 0
assert node0_blocks == node1_blocks
assert node2_blocks == node1_blocks
assert node3_blocks == node1_blocks
assert node4_blocks == node1_blocks
| [((14, 8, 14, 21), 'time.sleep', 'time.sleep', ({(14, 19, 14, 20): '(8)'}, {}), '(8)', False, 'import time\n'), ((31, 8, 31, 21), 'time.sleep', 'time.sleep', ({(31, 19, 31, 20): '(8)'}, {}), '(8)', False, 'import time\n'), ((48, 8, 48, 22), 'time.sleep', 'time.sleep', ({(48, 19, 48, 21): '(15)'}, {}), '(15)', False, 'import time\n')] |
projectpai/paipass | sandbox/pdp2/arbitrary_data/zip_files.py | 8b8e70b6808bf026cf957e240c7eed7bfcf4c55d | import zipfile
import random
RAND_INT_RANGE = (1,100)
def wrf(fname):
with open(fname, 'w') as f:
for i in range(100):
f.write(str(random.randint(*RAND_INT_RANGE)))
fnames = []
for i in range(10):
fname = 'file' + str(i) + '.txt'
wrf(fname)
fnames.append(fname)
dirpaths = set()
with zipfile.ZipFile('myzip.zip', 'w', compression=zipfile.ZIP_DEFLATED) as zf:
for fname in fnames:
dirpath = '/dirpath'+str(random.randint(*RAND_INT_RANGE))
# let's not have duplicate dirpaths.
while dirpath in dirpaths:
dirpath = '/dirpath' + str(random.randint(*RAND_INT_RANGE))
zf.write(fname, arcname=dirpath+'/'+fname)
dirpaths.add(dirpath)
print('dirpaths', dirpaths)
print('fnames', fnames)
| [((17, 5, 17, 72), 'zipfile.ZipFile', 'zipfile.ZipFile', (), '', False, 'import zipfile\n'), ((20, 33, 20, 64), 'random.randint', 'random.randint', ({(20, 48, 20, 63): '*RAND_INT_RANGE'}, {}), '(*RAND_INT_RANGE)', False, 'import random\n'), ((8, 24, 8, 55), 'random.randint', 'random.randint', ({(8, 39, 8, 54): '*RAND_INT_RANGE'}, {}), '(*RAND_INT_RANGE)', False, 'import random\n'), ((23, 39, 23, 70), 'random.randint', 'random.randint', ({(23, 54, 23, 69): '*RAND_INT_RANGE'}, {}), '(*RAND_INT_RANGE)', False, 'import random\n')] |
mwesterhof/wagtail_managed404 | tests/testproject/testproject/tests/test_middleware.py | a961271c7fc70accb43ec329da9defe36e3dab3c | import unittest
from django.test import Client
from wagtail.core.models import Page
from wagtail_managed404.models import PageNotFoundEntry
class TestMiddleware(unittest.TestCase):
"""Tests for `wagtail_app_pages` package."""
def setUp(self):
self.client = Client()
self.invalid_url = '/definitely_not_an_actual_url/'
self.redirect_to_url = '/much_better_url/'
self.redirect_to_page = Page.objects.get(depth=2)
def test_redirect_to_url(self):
PageNotFoundEntry.objects.all().delete()
entry = self._trigger_404()
entry.redirect_to_url = self.redirect_to_url
entry.save()
self._validate_redirect(self.invalid_url, self.redirect_to_url)
def test_redirect_to_page(self):
PageNotFoundEntry.objects.all().delete()
entry = self._trigger_404()
entry.redirect_to_page = self.redirect_to_page
entry.save()
self._validate_redirect(self.invalid_url, self.redirect_to_page.url)
def _trigger_404(self):
response = self.client.get(self.invalid_url)
self.assertEquals(response.status_code, 404)
entries = PageNotFoundEntry.objects.filter(url=self.invalid_url)
self.assertEquals(entries.count(), 1)
return entries.first()
def _validate_redirect(self, source_url, target_url):
response = self.client.get(source_url)
self.assertEquals(response.status_code, 302)
self.assertEquals(response.url, target_url)
| [((13, 22, 13, 30), 'django.test.Client', 'Client', ({}, {}), '()', False, 'from django.test import Client\n'), ((16, 32, 16, 57), 'wagtail.core.models.Page.objects.get', 'Page.objects.get', (), '', False, 'from wagtail.core.models import Page\n'), ((36, 18, 36, 72), 'wagtail_managed404.models.PageNotFoundEntry.objects.filter', 'PageNotFoundEntry.objects.filter', (), '', False, 'from wagtail_managed404.models import PageNotFoundEntry\n'), ((19, 8, 19, 39), 'wagtail_managed404.models.PageNotFoundEntry.objects.all', 'PageNotFoundEntry.objects.all', ({}, {}), '()', False, 'from wagtail_managed404.models import PageNotFoundEntry\n'), ((26, 8, 26, 39), 'wagtail_managed404.models.PageNotFoundEntry.objects.all', 'PageNotFoundEntry.objects.all', ({}, {}), '()', False, 'from wagtail_managed404.models import PageNotFoundEntry\n')] |
maraujop/django-reversion | src/reversion/version.py | c9e7788ca858d3c75b617a7277ffcd177a19d414 | __version__ = (1, 8, 5)
| [] |
hajime9652/observations | observations/r/bomsoi.py | 2c8b1ac31025938cb17762e540f2f592e302d5de | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def bomsoi(path):
"""Southern Oscillation Index Data
The Southern Oscillation Index (SOI) is the difference in barometric
pressure at sea level between Tahiti and Darwin. Annual SOI and
Australian rainfall data, for the years 1900-2001, are given.
Australia's annual mean rainfall is an area-weighted average of the
total annual precipitation at approximately 370 rainfall stations around
the country.
This data frame contains the following columns:
Year
a numeric vector
Jan
average January SOI values for each year
Feb
average February SOI values for each year
Mar
average March SOI values for each year
Apr
average April SOI values for each year
May
average May SOI values for each year
Jun
average June SOI values for each year
Jul
average July SOI values for each year
Aug
average August SOI values for each year
Sep
average September SOI values for each year
Oct
average October SOI values for each year
Nov
average November SOI values for each year
Dec
average December SOI values for each year
SOI
a numeric vector consisting of average annual SOI values
avrain
a numeric vector consisting of a weighted average annual rainfall at
a large number of Australian sites
NTrain
Northern Territory rain
northRain
north rain
seRain
southeast rain
eastRain
east rain
southRain
south rain
swRain
southwest rain
Australian Bureau of Meteorology web pages:
http://www.bom.gov.au/climate/change/rain02.txt and
http://www.bom.gov.au/climate/current/soihtm1.shtml
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `bomsoi.csv`.
Returns:
Tuple of np.ndarray `x_train` with 106 rows and 21 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'bomsoi.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/DAAG/bomsoi.csv'
maybe_download_and_extract(path, url,
save_file_name='bomsoi.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
| [((108, 9, 108, 33), 'os.path.expanduser', 'os.path.expanduser', ({(108, 28, 108, 32): 'path'}, {}), '(path)', False, 'import os\n'), ((112, 4, 114, 44), 'observations.util.maybe_download_and_extract', 'maybe_download_and_extract', (), '', False, 'from observations.util import maybe_download_and_extract\n'), ((116, 21, 116, 49), 'os.path.join', 'os.path.join', ({(116, 34, 116, 38): 'path', (116, 40, 116, 48): 'filename'}, {}), '(path, filename)', False, 'import os\n'), ((110, 24, 110, 52), 'os.path.join', 'os.path.join', ({(110, 37, 110, 41): 'path', (110, 43, 110, 51): 'filename'}, {}), '(path, filename)', False, 'import os\n')] |
dangerstudios/OpenPype | openpype/hosts/houdini/plugins/publish/validate_bypass.py | 10ddcc4699137888616eec57cd7fac9648189714 | import pyblish.api
import openpype.api
class ValidateBypassed(pyblish.api.InstancePlugin):
"""Validate all primitives build hierarchy from attribute when enabled.
The name of the attribute must exist on the prims and have the same name
as Build Hierarchy from Attribute's `Path Attribute` value on the Alembic
ROP node whenever Build Hierarchy from Attribute is enabled.
"""
order = openpype.api.ValidateContentsOrder - 0.1
families = ["*"]
hosts = ["houdini"]
label = "Validate ROP Bypass"
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
rop = invalid[0]
raise RuntimeError(
"ROP node %s is set to bypass, publishing cannot continue.." %
rop.path()
)
@classmethod
def get_invalid(cls, instance):
rop = instance[0]
if rop.isBypassed():
return [rop]
| [] |
GavinK-ai/cv | gavPrj/dataset_core.py | 6dd11b2100c40aca281508c3821c807ef0ee227d | import os
import cv2 as cv
import matplotlib.pyplot as plt
import numpy as np
#srcPaths = ('dataset/Screenshot1','dataset/Screenshot2','dataset/Screenshot3', 'dataset/Screenshot4')
#srcPaths = ('all_dataset/s1',
# 'all_dataset/s10',
# 'all_dataset/s11',
# 'all_dataset/s12',
# 'all_dataset/s13',
# 'all_dataset/s14',
# 'all_dataset/s15',
# 'all_dataset/s16',
# 'all_dataset/s17',
# 'all_dataset/s18',
# 'all_dataset/s19',
# 'all_dataset/s2',
# 'all_dataset/s20',
# 'all_dataset/s21',
# 'all_dataset/s22',
# 'all_dataset/s23',
# 'all_dataset/s24',
# 'all_dataset/s25',
# 'all_dataset/s26',
# 'all_dataset/s27',
# 'all_dataset/s28',
# 'all_dataset/s29',
# 'all_dataset/s3',
# 'all_dataset/s30',
# 'all_dataset/s31',
# 'all_dataset/s32',
# 'all_dataset/s33',
# 'all_dataset/s34',
# 'all_dataset/s35',
# 'all_dataset/s36',
# 'all_dataset/s37',
# 'all_dataset/s38',
# 'all_dataset/s39',
# 'all_dataset/s4',
# 'all_dataset/s40',
# 'all_dataset/s41',
# 'all_dataset/s42',
# 'all_dataset/s43',
# 'all_dataset/s44',
# 'all_dataset/s45',
# 'all_dataset/s46',
# 'all_dataset/s47',
# 'all_dataset/s48',
# 'all_dataset/s49',
# 'all_dataset/s5',
# 'all_dataset/s50',
# 'all_dataset/s51',
# 'all_dataset/s52',
# 'all_dataset/s53',
# 'all_dataset/s54',
# 'all_dataset/s55',
# 'all_dataset/s56',
# 'all_dataset/s57',
# 'all_dataset/s58',
# 'all_dataset/s59',
# 'all_dataset/s6',
# 'all_dataset/s60',
# 'all_dataset/s61',
# 'all_dataset/s62',
# 'all_dataset/s63',
# 'all_dataset/s7',
# 'all_dataset/s8',
# 'all_dataset/s9')
srcPaths = ('testdataset/t1','testdataset/t2')
datasetfilename = 'testdataset1.npz'
def create_dataset(datasetfilename, srcPaths, classNames):
imgList = []
labelList = []
labelNameList = []
for srcPath in srcPaths:
# append all files in srcPath dir into imgList and labelList
for fname in os.listdir(srcPath):
filePath = os.path.join(srcPath, fname)
img = cv.imread(filePath)
# spilt the last text in file name to save as label
fname_no_ext = os.path.splitext(fname)[0]
# label = fname_no_ext[-1]
label = fname_no_ext
imgList.append(img)
labelList.append(classNames[label])
labelNameList.append(label)
# convert to imgList to numpy
images = np.array(imgList, dtype='object')
labels = np.array(labelList, dtype='object')
labelnames = np.array(labelNameList)
# save converted images and labels into compressed numpy zip file
np.savez_compressed(datasetfilename, images=images, labels=labels, labelnames=labelnames)
return True
def displayImg():
# for fname in os.listdir(srcPath):
pass
if __name__ == '__main__':
# save a dataset in numpy compressed format
# datasetfilename = 'tiredataset.npz'
classNames = {'afiq':0, 'azureen':1, 'gavin':2, 'goke':3, 'inamul':4, 'jincheng':5, 'mahmuda':6, 'numan':7, 'saseendran':8}
if create_dataset(datasetfilename, srcPaths, classNames):
data = np.load(datasetfilename, allow_pickle=True)
imgList = data['images']
labelList = data['labels']
labelNameList = data['labelnames']
img = imgList[0]
label = labelList[0]
labelNameList = data['labelnames']
imgRGB = img[:, :, ::-1]
plt.imshow(imgRGB)
plt.title(label)
plt.show()
print(imgList.shape)
print(labelList.shape)
# imgList, labelList = create_dataset()
# img = imgList[0]
# label = labelList[0]
# imgRGB = img[:, :, ::-1]
# plt.imshow(imgRGB)
# plt.title(label)
# plt.show()
# img = imgList[1]
# label = labelList[1]
# imgRGB = img[:, :, ::-1]
# plt.imshow(imgRGB)
# plt.title(label)
# plt.show()
# img = imgList[3]
# label = labelList[3]
# imgRGB = img[:, :, ::-1]
# plt.imshow(imgRGB)
# plt.title(label)
# plt.show()
| [((108, 4, 108, 93), 'numpy.savez_compressed', 'np.savez_compressed', (), '', True, 'import numpy as np\n'), ((85, 21, 85, 40), 'os.listdir', 'os.listdir', ({(85, 32, 85, 39): 'srcPath'}, {}), '(srcPath)', False, 'import os\n'), ((103, 17, 103, 50), 'numpy.array', 'np.array', (), '', True, 'import numpy as np\n'), ((104, 17, 104, 52), 'numpy.array', 'np.array', (), '', True, 'import numpy as np\n'), ((105, 21, 105, 44), 'numpy.array', 'np.array', ({(105, 30, 105, 43): 'labelNameList'}, {}), '(labelNameList)', True, 'import numpy as np\n'), ((128, 15, 128, 58), 'numpy.load', 'np.load', (), '', True, 'import numpy as np\n'), ((139, 8, 139, 26), 'matplotlib.pyplot.imshow', 'plt.imshow', ({(139, 19, 139, 25): 'imgRGB'}, {}), '(imgRGB)', True, 'import matplotlib.pyplot as plt\n'), ((140, 8, 140, 24), 'matplotlib.pyplot.title', 'plt.title', ({(140, 18, 140, 23): 'label'}, {}), '(label)', True, 'import matplotlib.pyplot as plt\n'), ((142, 8, 142, 18), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((87, 23, 87, 51), 'os.path.join', 'os.path.join', ({(87, 36, 87, 43): 'srcPath', (87, 45, 87, 50): 'fname'}, {}), '(srcPath, fname)', False, 'import os\n'), ((89, 18, 89, 37), 'cv2.imread', 'cv.imread', ({(89, 28, 89, 36): 'filePath'}, {}), '(filePath)', True, 'import cv2 as cv\n'), ((93, 27, 93, 50), 'os.path.splitext', 'os.path.splitext', ({(93, 44, 93, 49): 'fname'}, {}), '(fname)', False, 'import os\n')] |
jinified/kronos | kronos/kronos.py | 1f110372a025d28ccc407372320491ee818c893d | """
Kronos: A simple scheduler for graduate training programme
Entities: User, Schedule, Rotation
"""
from operator import itemgetter
from datetime import datetime, timedelta
def getRotationCapacity(rotationId, startDate, endDate, assignments):
""" Calculate number of users assigned to a particular rotation during the specified duration
"""
start = datetime.strptime(startDate, "%d%m%Y")
end = datetime.strptime(endDate, "%d%m%Y")
duration = int((end - start).days / 7.0)
# Weeks involved during the rotation
weeks = [(start + timedelta(weeks=x)).strftime("%W%Y") for x in range(0, duration)]
capacity = sum(itemgetter(*weeks)(assignments[rotationId][0][0]))
return capacity
def score_assignment(
assignments,
solution,
earliestAvailableDate,
core_rotations=["PMO", "PE", "SE", "PM"],
rotation_duration={
"PMO": 12,
"PE": 12,
"SE": 12,
"PM": 12,
"SYS": 12,
"ARC": 12,
"ANA": 12,
},
):
""" Calculate loss function for suggested solution (negative = better)
Parameters:
assignments (dict): global assignment object by rotation
solution (dict): rotation assignment for a user
earliestAvailableDate (date): earliest date where a user can be assigned a rotation
core_rotations (list): rotation that should be completed first
rotation_duration (dict): duration of each rotation
"""
print(solution)
# SOFT CONSTRAINT 1 - Core rotations should be completed in the first 4 rotations if possible
core_first_loss = sum(
[
-3 if x[0] in core_rotations else 0
for x in solution
if int(x[1]) <= len(core_rotations)
]
)
# SOFT CONSTRAINT 2 - External Assignment must be assigned last
external_assignment_loss = (
99 if "EXT" in [x[0] for x in solution] and solution[-1][0] != "EXT" else 0
)
# Calculate timing of each rotation from solution
solution = [
(
x[0],
rotation_duration[x[0]]
+ (sum([rotation_duration[x[0]] for x in solution[:i]]) if i != 0 else 0),
)
for i, x in enumerate(solution)
]
startDate = earliestAvailableDate
schedule = []
for x in solution:
endDate = startDate + timedelta(weeks=x[1]) - timedelta(days=1)
# Make sure the date falls on weekday
if endDate.weekday() >= 5:
endDate -= timedelta(endDate.weekday() - 4)
schedule.append(
(x[0], startDate.strftime("%d%m%Y"), endDate.strftime("%d%m%Y"))
)
startDate += timedelta(weeks=x[1])
spread_first_loss = sum(
[getRotationCapacity(x[0], x[1], x[2], assignments) for x in schedule]
)
loss = core_first_loss + external_assignment_loss + spread_first_loss
return loss
def schedule2assignments(schedule):
""" Convert schedule object to assignment object
"""
rotations = {}
for userId, userSchedule in schedule.items():
for rotation in userSchedule:
id = rotation["rotationId"]
if id not in rotations:
rotations[id] = [[{}], []]
print(rotations[id][0][0])
startDate, endDate = itemgetter("startDate", "endDate")(rotation)
start = datetime.strptime(startDate, "%d%m%Y")
end = datetime.strptime(endDate, "%d%m%Y")
duration = int((end - start).days / 7.0)
for i in range(duration):
date = (start + timedelta(weeks=i)).strftime("%W%Y")
if date not in rotations[id][0][0]:
rotations[id][0][0][date] = 0
rotations[id][0][0][date] += 1
rotations[id][1].append((userId, startDate, endDate))
sortedDate = sorted(list(rotations[id][0][0].keys()))
if len(rotations[id][0]) < 2:
rotations[id][0].append(sortedDate[0])
rotations[id][0].append(sortedDate[-1])
elif sortedDate[0] < rotations[id][0][1]:
rotations[id][0][1] = sortedDate[0]
elif len(rotations[id][0]) > 2 and sortedDate[-1] > rotations[id][0][2]:
rotations[id][0][2] = sortedDate[-1]
print(rotations)
return rotations
def assignments2schedule(assignments):
""" Convert assignment object to overall schedule
"""
users = {}
for rotationId, rotationInfo in assignments.items():
for userId, userAssignment in rotationInfo[1].items():
if userId not in users:
users[userId] = []
users[userId].append(
{
"rotationId": rotationId,
"startDate": userAssignment[0],
"endDate": userAssignment[1],
}
)
print(users)
return users
def generateUserSchedule(user, assignments, scoring_function):
""" Generate most optimal user schedule
Parameters:
user (object): User
assignments (dict): Time-bounded assignments
scoring_function (function): scoring function to rank possible assignments
Returns:
schedule (list): list of rotations
"""
return [{"rotationId": "PMO", "startDate": "012018"}]
def getOverallSchedule(users):
""" Generate overall schedule from individual user's schedule
Parameters:
users (list): list of Users
Returns:
schedule (dict): overall assignments
"""
return {}
def getConflictingAssignments(schedule):
""" Get list of assignments which exceeded rotation capacity
Parameters:
schedule (dict): overall assignments
Returns:
confictingAssignmentsByRotation (dict): overall schedule with conflicting assignments
"""
return {}
if __name__ == "__main__":
pass
| [((14, 12, 14, 50), 'datetime.datetime.strptime', 'datetime.strptime', ({(14, 30, 14, 39): 'startDate', (14, 41, 14, 49): '"""%d%m%Y"""'}, {}), "(startDate, '%d%m%Y')", False, 'from datetime import datetime, timedelta\n'), ((15, 10, 15, 46), 'datetime.datetime.strptime', 'datetime.strptime', ({(15, 28, 15, 35): 'endDate', (15, 37, 15, 45): '"""%d%m%Y"""'}, {}), "(endDate, '%d%m%Y')", False, 'from datetime import datetime, timedelta\n'), ((79, 21, 79, 42), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import datetime, timedelta\n'), ((19, 19, 19, 37), 'operator.itemgetter', 'itemgetter', ({(19, 30, 19, 36): '*weeks'}, {}), '(*weeks)', False, 'from operator import itemgetter\n'), ((72, 54, 72, 71), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import datetime, timedelta\n'), ((99, 20, 99, 58), 'datetime.datetime.strptime', 'datetime.strptime', ({(99, 38, 99, 47): 'startDate', (99, 49, 99, 57): '"""%d%m%Y"""'}, {}), "(startDate, '%d%m%Y')", False, 'from datetime import datetime, timedelta\n'), ((100, 18, 100, 54), 'datetime.datetime.strptime', 'datetime.strptime', ({(100, 36, 100, 43): 'endDate', (100, 45, 100, 53): '"""%d%m%Y"""'}, {}), "(endDate, '%d%m%Y')", False, 'from datetime import datetime, timedelta\n'), ((72, 30, 72, 51), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import datetime, timedelta\n'), ((98, 33, 98, 67), 'operator.itemgetter', 'itemgetter', ({(98, 44, 98, 55): '"""startDate"""', (98, 57, 98, 66): '"""endDate"""'}, {}), "('startDate', 'endDate')", False, 'from operator import itemgetter\n'), ((18, 22, 18, 40), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import datetime, timedelta\n'), ((103, 32, 103, 50), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import datetime, timedelta\n')] |
jestinmwilson/personal-website | personal_env/lib/python3.8/site-packages/pylint/lint/utils.py | 6e47a7f33ed3b1ca5c1d42c89c5380d22992ed74 | # Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
import contextlib
import sys
from pylint.utils import utils
class ArgumentPreprocessingError(Exception):
"""Raised if an error occurs during argument preprocessing."""
def preprocess_options(args, search_for):
"""look for some options (keys of <search_for>) which have to be processed
before others
values of <search_for> are callback functions to call when the option is
found
"""
i = 0
while i < len(args):
arg = args[i]
if arg.startswith("--"):
try:
option, val = arg[2:].split("=", 1)
except ValueError:
option, val = arg[2:], None
try:
cb, takearg = search_for[option]
except KeyError:
i += 1
else:
del args[i]
if takearg and val is None:
if i >= len(args) or args[i].startswith("-"):
msg = "Option %s expects a value" % option
raise ArgumentPreprocessingError(msg)
val = args[i]
del args[i]
elif not takearg and val is not None:
msg = "Option %s doesn't expects a value" % option
raise ArgumentPreprocessingError(msg)
cb(option, val)
else:
i += 1
def _patch_sys_path(args):
original = list(sys.path)
changes = []
seen = set()
for arg in args:
path = utils.get_python_path(arg)
if path not in seen:
changes.append(path)
seen.add(path)
sys.path[:] = changes + sys.path
return original
@contextlib.contextmanager
def fix_import_path(args):
"""Prepare sys.path for running the linter checks.
Within this context, each of the given arguments is importable.
Paths are added to sys.path in corresponding order to the arguments.
We avoid adding duplicate directories to sys.path.
`sys.path` is reset to its original value upon exiting this context.
"""
original = _patch_sys_path(args)
try:
yield
finally:
sys.path[:] = original
| [((54, 15, 54, 41), 'pylint.utils.utils.get_python_path', 'utils.get_python_path', ({(54, 37, 54, 40): 'arg'}, {}), '(arg)', False, 'from pylint.utils import utils\n')] |
deepneuralmachine/google-research | mol_dqn/experimental/multi_obj.py | d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Generates molecules that satisfy two targets.
Target1: SAS
Target2: QED
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import os
from absl import app
from absl import flags
from rdkit import Chem
from rdkit.Chem import QED
from rdkit.Contrib import SA_Score
from tensorflow.compat.v1 import gfile
from mol_dqn.chemgraph.mcts import deep_q_networks
from mol_dqn.chemgraph.mcts import molecules as molecules_mdp
from mol_dqn.chemgraph.mcts import run_dqn
from mol_dqn.chemgraph.tensorflow import core
flags.DEFINE_float('target_sas', 1, 'The target SAS of the molecule.')
flags.DEFINE_float('target_qed', 0.5, 'The target QED of the molecule.')
flags.DEFINE_float('gamma', 0.999, 'discount')
FLAGS = flags.FLAGS
class MultiObjectiveRewardMolecule(molecules_mdp.Molecule):
"""Defines the subclass of generating a molecule with a specific reward.
The reward is defined as a 1-D vector with 2 entries: similarity and QED
reward = (similarity_score, qed_score)
"""
def _reward(self):
"""Calculates the reward of the current state.
The reward is defined as a tuple of the similarity and QED value.
Returns:
A tuple of the similarity and qed value
"""
# calculate similarity.
# if the current molecule does not contain the scaffold of the target,
# similarity is zero.
if self._state is None:
return 0.0, 0.0
mol = Chem.MolFromSmiles(self._state)
if mol is None:
return 0.0, 0.0
qed_value = QED.qed(mol)
sas = SA_Score.sascorer.calculateScore(mol)
return -abs(sas - FLAGS.target_sas), -abs(qed_value - FLAGS.target_qed)
def soft_cst(v, l, r):
if l <= v <= r:
return 1
return -min(abs(l - v), abs(r - v))
class Molecule(molecules_mdp.Molecule):
"""SAS and QED reward molecule."""
def _reward(self):
"""Calculates the reward of the current state.
The reward is defined as a tuple of the similarity and QED value.
Returns:
A tuple of the similarity and qed value
"""
# calculate similarity.
# if the current molecule does not contain the scaffold of the target,
# similarity is zero.
if self._state is None:
return 0.0, 0.0
mol = Chem.MolFromSmiles(self._state)
if mol is None:
return 0.0, 0.0
qed_value = QED.qed(mol)
sas = SA_Score.sascorer.calculateScore(mol)
# c1 = soft_cst(sas, FLAGS.target_sas - 0.2, FLAGS.target_sas + 0.2)
# c2 = soft_cst(qed_value, FLAGS.target_qed - 0.1, FLAGS.target_qed + 0.1)
# # if c1 < 0 and c2 < 0:
# # return - c1 * c2
# # else:
# # return c1 * c2
return (soft_cst(sas, FLAGS.target_sas - 0.2, FLAGS.target_sas + 0.2) +
soft_cst(qed_value, FLAGS.target_qed - 0.1,
FLAGS.target_qed + 0.1)) * FLAGS.gamma**(
self.max_steps - self._counter)
def main(argv):
del argv
if FLAGS.hparams is not None:
with gfile.Open(FLAGS.hparams, 'r') as f:
hparams = deep_q_networks.get_hparams(**json.load(f))
else:
hparams = deep_q_networks.get_hparams()
hparams.add_hparam('target_qed', FLAGS.target_qed)
hparams.add_hparam('target_sas', FLAGS.target_sas)
environment = Molecule(
atom_types=set(hparams.atom_types),
init_mol='CCc1c(C)[nH]c2CCC(CN3CCOCC3)C(=O)c12',
allow_removal=hparams.allow_removal,
allow_no_modification=hparams.allow_no_modification,
allow_bonds_between_rings=False,
allowed_ring_sizes={3, 4, 5, 6},
max_steps=hparams.max_steps_per_episode)
dqn = deep_q_networks.DeepQNetwork(
input_shape=(hparams.batch_size, hparams.fingerprint_length + 1),
q_fn=functools.partial(
deep_q_networks.multi_layer_model, hparams=hparams),
optimizer=hparams.optimizer,
grad_clipping=hparams.grad_clipping,
num_bootstrap_heads=hparams.num_bootstrap_heads,
gamma=hparams.gamma,
epsilon=1.0)
run_dqn.run_training(
hparams=hparams,
environment=environment,
dqn=dqn,
)
core.write_hparams(hparams, os.path.join(FLAGS.model_dir, 'config.json'))
if __name__ == '__main__':
app.run(main)
| [((42, 0, 42, 70), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', ({(42, 19, 42, 31): '"""target_sas"""', (42, 33, 42, 34): '(1)', (42, 36, 42, 69): '"""The target SAS of the molecule."""'}, {}), "('target_sas', 1, 'The target SAS of the molecule.')", False, 'from absl import flags\n'), ((43, 0, 43, 72), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', ({(43, 19, 43, 31): '"""target_qed"""', (43, 33, 43, 36): '(0.5)', (43, 38, 43, 71): '"""The target QED of the molecule."""'}, {}), "('target_qed', 0.5, 'The target QED of the molecule.')", False, 'from absl import flags\n'), ((44, 0, 44, 46), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', ({(44, 19, 44, 26): '"""gamma"""', (44, 28, 44, 33): '(0.999)', (44, 35, 44, 45): '"""discount"""'}, {}), "('gamma', 0.999, 'discount')", False, 'from absl import flags\n'), ((148, 2, 152, 3), 'mol_dqn.chemgraph.mcts.run_dqn.run_training', 'run_dqn.run_training', (), '', False, 'from mol_dqn.chemgraph.mcts import run_dqn\n'), ((158, 2, 158, 15), 'absl.app.run', 'app.run', ({(158, 10, 158, 14): 'main'}, {}), '(main)', False, 'from absl import app\n'), ((68, 10, 68, 41), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', ({(68, 29, 68, 40): 'self._state'}, {}), '(self._state)', False, 'from rdkit import Chem\n'), ((72, 16, 72, 28), 'rdkit.Chem.QED.qed', 'QED.qed', ({(72, 24, 72, 27): 'mol'}, {}), '(mol)', False, 'from rdkit.Chem import QED\n'), ((73, 10, 73, 47), 'rdkit.Contrib.SA_Score.sascorer.calculateScore', 'SA_Score.sascorer.calculateScore', ({(73, 43, 73, 46): 'mol'}, {}), '(mol)', False, 'from rdkit.Contrib import SA_Score\n'), ((99, 10, 99, 41), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', ({(99, 29, 99, 40): 'self._state'}, {}), '(self._state)', False, 'from rdkit import Chem\n'), ((103, 16, 103, 28), 'rdkit.Chem.QED.qed', 'QED.qed', ({(103, 24, 103, 27): 'mol'}, {}), '(mol)', False, 'from rdkit.Chem import QED\n'), ((104, 10, 104, 47), 'rdkit.Contrib.SA_Score.sascorer.calculateScore', 'SA_Score.sascorer.calculateScore', ({(104, 43, 104, 46): 'mol'}, {}), '(mol)', False, 'from rdkit.Contrib import SA_Score\n'), ((124, 14, 124, 43), 'mol_dqn.chemgraph.mcts.deep_q_networks.get_hparams', 'deep_q_networks.get_hparams', ({}, {}), '()', False, 'from mol_dqn.chemgraph.mcts import deep_q_networks\n'), ((154, 30, 154, 74), 'os.path.join', 'os.path.join', ({(154, 43, 154, 58): 'FLAGS.model_dir', (154, 60, 154, 73): '"""config.json"""'}, {}), "(FLAGS.model_dir, 'config.json')", False, 'import os\n'), ((121, 9, 121, 39), 'tensorflow.compat.v1.gfile.Open', 'gfile.Open', ({(121, 20, 121, 33): 'FLAGS.hparams', (121, 35, 121, 38): '"""r"""'}, {}), "(FLAGS.hparams, 'r')", False, 'from tensorflow.compat.v1 import gfile\n'), ((140, 11, 141, 61), 'functools.partial', 'functools.partial', (), '', False, 'import functools\n'), ((122, 46, 122, 58), 'json.load', 'json.load', ({(122, 56, 122, 57): 'f'}, {}), '(f)', False, 'import json\n')] |
uw-it-aca/myuw | myuw/test/views/test_rest_search.py | 3fa1fabeb3c09d81a049f7c1a8c94092d612438a | # Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
# -*- coding: utf-8 -*-
from django.test.utils import override_settings
from django.urls import reverse
from myuw.test.api import MyuwApiTest
@override_settings(
RESTCLIENTS_ADMIN_AUTH_MODULE='rc_django.tests.can_proxy_restclient')
class RestSearchViewTest(MyuwApiTest):
def test_post(self):
self.set_user('javerage')
# hfs
url = reverse("myuw_rest_search", args=["hfs", "accounts"])
response = self.client.post(url, {"uwnetid": "javerage"})
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.url, "/restclients/view/hfs/myuw/v1/javerage")
# bookstore
url = reverse("myuw_rest_search", args=["book", "index"])
response = self.client.post(url, {
"sln1": "123", "quarter": "spring", "returnlink": "t"})
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, (
"/restclients/view/book/uw/json_utf8_202007.ubs%3F"
"quarter=spring&sln1=123&returnlink=t"))
# myplan
url = reverse("myuw_rest_search", args=["myplan", "index"])
response = self.client.post(url, {
"uwregid": "ABC", "year": "2013", "quarter": "spring"})
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.url,
"/restclients/view/myplan/student/api/plan/v1/2013,spring,1,ABC")
# libraries
url = reverse("myuw_rest_search", args=["libraries", "accounts"])
response = self.client.post(url, {"id": "javerage"})
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.url,
"/restclients/view/libraries/mylibinfo/v1/?id=javerage")
# iasystem
url = reverse("myuw_rest_search", args=[
"iasystem_uw", "uw/api/v1/evaluation"])
response = self.client.post(url, {"student_id": "123456"})
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, (
"/restclients/view/iasystem_uw/api/" +
"v1/evaluation?student_id=123456"))
# uwnetid
url = reverse("myuw_rest_search", args=["uwnetid", "password"])
response = self.client.post(url, {"uwnetid": "javerage"})
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.url,
"/restclients/view/uwnetid/nws/v1/uwnetid/javerage/password")
url = reverse("myuw_rest_search", args=["uwnetid", "subscription"])
response = self.client.post(url, {"uwnetid": "javerage"})
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.url,
"/restclients/view/uwnetid/nws/v1/uwnetid/" +
"javerage/subscription/60,64,105")
# grad
url = reverse("myuw_rest_search", args=[
"grad", "services/students/v1/api/committee"])
response = self.client.post(url, {
"id": "12345", "csrfmiddlewaretoken": "0000000"})
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, (
"/restclients/view/grad/services/" +
"students/v1/api/committee?id=12345"))
# notices
url = reverse("myuw_rest_search", args=["sws", "notices"])
response = self.client.post(url, {
"uwregid": "12345678123456781234567812345678",
"csrfmiddlewaretoken": "0000000"})
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, (
"/restclients/view/sws/student/v5/notice/" +
"12345678123456781234567812345678.json"))
# upass
url = reverse("myuw_rest_search", args=["upass", "index"])
response = self.client.post(url, {
"uwnetid": "bill",
"csrfmiddlewaretoken": "0000000"})
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.url,
"/restclients/view/upass/MyUWUpass/MyUWUpass.aspx%3Fid=bill")
| [((10, 1, 11, 73), 'django.test.utils.override_settings', 'override_settings', (), '', False, 'from django.test.utils import override_settings\n'), ((18, 14, 18, 67), 'django.urls.reverse', 'reverse', (), '', False, 'from django.urls import reverse\n'), ((25, 14, 25, 65), 'django.urls.reverse', 'reverse', (), '', False, 'from django.urls import reverse\n'), ((34, 14, 34, 67), 'django.urls.reverse', 'reverse', (), '', False, 'from django.urls import reverse\n'), ((43, 14, 43, 73), 'django.urls.reverse', 'reverse', (), '', False, 'from django.urls import reverse\n'), ((51, 14, 52, 51), 'django.urls.reverse', 'reverse', (), '', False, 'from django.urls import reverse\n'), ((60, 14, 60, 71), 'django.urls.reverse', 'reverse', (), '', False, 'from django.urls import reverse\n'), ((67, 14, 67, 75), 'django.urls.reverse', 'reverse', (), '', False, 'from django.urls import reverse\n'), ((76, 14, 77, 58), 'django.urls.reverse', 'reverse', (), '', False, 'from django.urls import reverse\n'), ((86, 14, 86, 66), 'django.urls.reverse', 'reverse', (), '', False, 'from django.urls import reverse\n'), ((96, 14, 96, 66), 'django.urls.reverse', 'reverse', (), '', False, 'from django.urls import reverse\n')] |
danagle/boggled | examples/cli-solver/cli_solver.py | 13fea4c31b5dff72093c38d1ad368dec9d44f4d0 | # cli_solver.py
import argparse
import os
from boggled import BoggleBoard, BoggleSolver, BoggleWords
def solve_board(board, words):
solver = BoggleSolver(board, words)
solver.solve()
return solver
def display_board_details(board):
print("Board details:")
print("Columns: ", board.columns)
print("Rows: ", board.rows)
s = '\n'
for pos in board.tiles:
s += ' ' if len(board.tiles[pos]) == 2 else ' '
s += board.tiles[pos]
if (pos % board.columns) == 0:
s += '\n'
print(s)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("letters", type=str,
help="Board letters")
parser.add_argument("dictionary", type=str,
help="The text file containing the dictionary word list.")
parser.add_argument("-m", "--min", type=int,
help="The minimum word size.")
parser.add_argument("-p", "--paths", action="store_true",
help="Include the path followed for each word found.")
args = parser.parse_args()
if os.path.isfile(args.dictionary):
if isinstance(args.min, int):
words = BoggleWords(args.min)
else:
words = BoggleWords()
words.loadFromFile(args.dictionary)
board = BoggleBoard(args.letters)
display_board_details(board)
solved_board = solve_board(board, words)
print('Found:', len(solved_board.found))
if args.paths:
for word in solved_board.found:
print('{} : {}'.format(word, solved_board.found[word]))
else:
print(solved_board.foundWords)
else:
print("Error: Unable to find the dictionary.")
| [((9, 13, 9, 39), 'boggled.BoggleSolver', 'BoggleSolver', ({(9, 26, 9, 31): 'board', (9, 33, 9, 38): 'words'}, {}), '(board, words)', False, 'from boggled import BoggleBoard, BoggleSolver, BoggleWords\n'), ((28, 13, 28, 38), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({}, {}), '()', False, 'import argparse\n'), ((40, 7, 40, 38), 'os.path.isfile', 'os.path.isfile', ({(40, 22, 40, 37): 'args.dictionary'}, {}), '(args.dictionary)', False, 'import os\n'), ((47, 16, 47, 41), 'boggled.BoggleBoard', 'BoggleBoard', ({(47, 28, 47, 40): 'args.letters'}, {}), '(args.letters)', False, 'from boggled import BoggleBoard, BoggleSolver, BoggleWords\n'), ((42, 20, 42, 41), 'boggled.BoggleWords', 'BoggleWords', ({(42, 32, 42, 40): 'args.min'}, {}), '(args.min)', False, 'from boggled import BoggleBoard, BoggleSolver, BoggleWords\n'), ((44, 20, 44, 33), 'boggled.BoggleWords', 'BoggleWords', ({}, {}), '()', False, 'from boggled import BoggleBoard, BoggleSolver, BoggleWords\n')] |
gitter-badger/wepy-1 | src/wepy/orchestration/orchestrator.py | 9bc619aeae178ad5d10f658fae2abfd2c7aeb18a | from copy import copy, deepcopy
import sqlite3
from hashlib import md5
import time
import os
import os.path as osp
from base64 import b64encode, b64decode
from zlib import compress, decompress
import itertools as it
import logging
# instead of pickle we use dill, so we can save dynamically defined
# classes
import dill
from wepy.sim_manager import Manager
from wepy.orchestration.configuration import Configuration
from wepy.orchestration.snapshot import SimApparatus, SimSnapshot
from wepy.util.kv import KV, SQLITE3_INMEMORY_URI, gen_uri
class OrchestratorError(Exception):
""" """
pass
class Orchestrator():
""" """
# we freeze the pickle protocol for making hashes, because we care
# more about stability than efficiency of newer versions
HASH_PICKLE_PROTOCOL = 3
DEFAULT_WORKDIR = Configuration.DEFAULT_WORKDIR
DEFAULT_CONFIG_NAME = Configuration.DEFAULT_CONFIG_NAME
DEFAULT_NARRATION = Configuration.DEFAULT_NARRATION
DEFAULT_MODE = Configuration.DEFAULT_MODE
DEFAULT_CHECKPOINT_FILENAME = "checkpoint.orch.sqlite"
ORCH_FILENAME_TEMPLATE = "{config}{narration}.orch.sqlite"
# the default way to oepn up the whole parent database
DEFAULT_ORCHESTRATION_MODE = 'x'
# mode to open the individual kv stores on the parent database
KV_MODE = 'r+'
# default timeout for connecting to a database
SQLITE3_DEFAULT_TIMEOUT = 5
# the fields to return (and their order) as a record for a run
# query
RUN_SELECT_FIELDS = ('last_cycle_idx', 'config_hash')
def __init__(self, orch_path=None,
mode='x',
append_only=False,
):
self._mode = mode
self._append_only = append_only
# handle the path and convert to a proper URI for the database
# given the path and the mode
self._db_uri = gen_uri(orch_path, mode)
# run table: start_hash, end_hash, num_cycles, configuration_id
# get a raw connection to the database
self._db = sqlite3.connect(self.db_uri, uri=True,
timeout=self.SQLITE3_DEFAULT_TIMEOUT)
self._closed = False
# set isolation level to autocommit
self._db.isolation_level = None
# we can use read_uncommited only in append_only mode (no
# updates) because you never have to worry about dirty reads
# since you can't update
if self.append_only:
self._db.execute("PRAGMA read_uncommited=1")
# we make a table for the run data, if it doesn't already
# exist
c = self._db.cursor().execute(self.create_run_table_query)
# initialize or open each of the separate KV-stores (tables in
# the same SQLite3 database)
# change the mode for the KV stores since we already created the database
# metadata: default init walkers, default apparatus, default
# configuration
self.metadata_kv = KV(db_url=self.db_uri,
table='meta',
mode='a',
value_types=None,
append_only=self.append_only)
# snapshots
self.snapshot_kv = KV(db_url=self.db_uri,
table='snapshots',
primary_key='snaphash',
value_name='snapshot',
mode='a',
append_only=self.append_only)
# configurations
self.configuration_kv = KV(db_url=self.db_uri,
table='configurations',
primary_key='config_hash',
value_name='config',
mode='a',
append_only=self.append_only)
@property
def mode(self):
return self._mode
@property
def append_only(self):
return self._append_only
def close(self):
if self._closed == True:
raise IOError("The database connection is already closed")
else:
# close all the connections
self.metadata_kv.close()
self.configuration_kv.close()
self.snapshot_kv.close()
self._db.close()
self._closed = True
@property
def db_uri(self):
return self._db_uri
@property
def orch_path(self):
# if it is not an in-memory database we parse off the path and
# return that
if self.db_uri == SQLITE3_INMEMORY_URI:
return None
else:
# URIs have the following form: protocol:url?query
# destructure the URI
_, tail = self.db_uri.split(':')
if len(tail.split('?')) > 1:
url, _ = tail.split('?')
else:
url = tail
return url
@classmethod
def serialize(cls, snapshot):
"""Serialize a snapshot to a compressed, encoded, pickle string
representation.
Currently uses the dill module for pickling because the base
pickle module is inadequate. However, it is mostly compatible
and can be read natively with pickle but this usage is
officially not supported. Instead use the deserialize_snapshot.
Also compresses with default zlib compression and is encoded
in base64.
The object will always have a deepcopy performed on it so that
all of the extraneous references to it are avoided since there
is no (AFAIK) way to make sure all references to an object are
deleted.
NOTE: Perhaps there is a way and that should be done (and
tested) to see if it provides stable pickles (i.e. pickles
that always hash to the same value). To avoid the overhead of
copying large objects.
Parameters
----------
snapshot : SimSnapshot object
The snapshot of the simulation you want to serialize.
Returns
-------
serial_str : str
Serialized string of the snapshot object
"""
serial_str = b64encode(
compress(
dill.dumps(
deepcopy(snapshot),
protocol=cls.HASH_PICKLE_PROTOCOL,
recurse=True)
)
)
return serial_str
# core methods for serializing python objects, used for snapshots,
# apparatuses, configurations, and the initial walker list
@classmethod
def deserialize(cls, serial_str):
"""Deserialize an unencoded string snapshot to an object.
Parameters
----------
serial_str : str
Serialized string of the snapshot object
Returns
-------
snapshot : SimSnapshot object
Simulation snapshot object
"""
return dill.loads(decompress(b64decode(serial_str)))
# defaults getters and setters
def set_default_sim_apparatus(self, sim_apparatus):
# serialize the apparatus and then set it
serial_app = self.serialize(sim_apparatus)
self.metadata_kv['default_sim_apparatus'] = serial_app
def set_default_init_walkers(self, init_walkers):
# serialize the apparatus and then set it
serial_walkers = self.serialize(init_walkers)
self.metadata_kv['default_init_walkers'] = serial_walkers
def set_default_configuration(self, configuration):
# serialize the apparatus and then set it
serial_config = self.serialize(configuration)
config_hash = self.hash_snapshot(serial_config)
self.metadata_kv['default_configuration_hash'] = config_hash
self.configuration_kv[config_hash] = serial_config
def set_default_snapshot(self, snapshot):
snaphash = self.add_snapshot(snapshot)
# then save the hash in the metadata
self.metadata_kv['default_snapshot_hash'] = snaphash
return snaphash
def gen_default_snapshot(self):
# generate the snapshot
sim_start_hash = self.gen_start_snapshot(self.get_default_init_walkers())
# then save the hash in the metadata
self.metadata_kv['default_snapshot_hash'] = sim_start_hash
return sim_start_hash
def get_default_sim_apparatus(self):
return self.deserialize(self.metadata_kv['default_sim_apparatus'])
def get_default_init_walkers(self):
return self.deserialize(self.metadata_kv['default_init_walkers'])
def get_default_configuration(self):
config_hash = self.metadata_kv['default_configuration_hash']
return self.get_configuration(config_hash)
def get_default_configuration_hash(self):
return self.metadata_kv['default_configuration_hash']
def get_default_snapshot(self):
start_hash = self.metadata_kv['default_snapshot_hash']
return self.get_snapshot(start_hash)
def get_default_snapshot_hash(self):
return self.metadata_kv['default_snapshot_hash']
@classmethod
def hash_snapshot(cls, serial_str):
"""
Parameters
----------
serial_str :
Returns
-------
"""
return md5(serial_str).hexdigest()
def get_snapshot(self, snapshot_hash):
"""Returns a copy of a snapshot.
Parameters
----------
snapshot_hash :
Returns
-------
"""
return self.deserialize(self.snapshot_kv[snapshot_hash])
def get_configuration(self, config_hash):
"""Returns a copy of a snapshot.
Parameters
----------
config_hash :
Returns
-------
"""
return self.deserialize(self.configuration_kv[config_hash])
@property
def snapshot_hashes(self):
""" """
# iterate over the snapshot kv
return list(self.snapshot_kv.keys())
@property
def configuration_hashes(self):
""" """
# iterate over the snapshot kv
return list(self.configuration_kv.keys())
def add_snapshot(self, snapshot):
"""
Parameters
----------
snapshot :
Returns
-------
"""
# serialize the snapshot using the protocol for doing so
serialized_snapshot = self.serialize(snapshot)
# get the hash of the snapshot
snaphash = self.hash_snapshot(serialized_snapshot)
# check that the hash is not already in the snapshots
if any([True if snaphash == md5 else False for md5 in self.snapshot_hashes]):
# just skip the rest of the function and return the hash
return snaphash
# save the snapshot in the KV store
self.snapshot_kv[snaphash] = serialized_snapshot
return snaphash
def add_serial_snapshot(self, serial_snapshot):
# get the hash of the snapshot
snaphash = self.hash_snapshot(serial_snapshot)
# check that the hash is not already in the snapshots
if any([True if snaphash == md5 else False for md5 in self.snapshot_hashes]):
# just skip the rest of the function and return the hash
return snaphash
# save the snapshot in the KV store
self.snapshot_kv[snaphash] = serial_snapshot
return snaphash
def gen_start_snapshot(self, init_walkers):
"""
Parameters
----------
init_walkers :
Returns
-------
"""
# make a SimSnapshot object using the initial walkers and
start_snapshot = SimSnapshot(init_walkers, self.get_default_sim_apparatus())
# save the snapshot, and generate its hash
sim_start_md5 = self.add_snapshot(start_snapshot)
return sim_start_md5
@property
def default_snapshot_hash(self):
""" """
return self.metadata_kv['default_snapshot_hash']
@property
def default_snapshot(self):
""" """
return self.get_snapshot(self.default_snapshot_hash)
def snapshot_registered(self, snapshot):
"""Check whether a snapshot is already in the database, based on the
hash of it.
This serializes the snapshot so may be slow.
Parameters
----------
snapshot : SimSnapshot object
The snapshot object you want to query for.
Returns
-------
"""
# serialize and hash the snapshot
snaphash = self.hash_snapshot(self.serialize(snapshot))
# then check it
return self.snapshot_hash_registered(snaphash)
def snapshot_hash_registered(self, snapshot_hash):
"""Check whether a snapshot hash is already in the database.
Parameters
----------
snapshot_hash : str
The string hash of the snapshot.
Returns
-------
"""
if any([True if snapshot_hash == h else False for h in self.snapshot_hashes]):
return True
else:
return False
def configuration_hash_registered(self, config_hash):
"""Check whether a snapshot hash is already in the database.
Parameters
----------
snapshot_hash : str
The string hash of the snapshot.
Returns
-------
"""
if any([True if config_hash == h else False for h in self.configuration_hashes]):
return True
else:
return False
### run methods
def add_configuration(self, configuration):
serialized_config = self.serialize(configuration)
config_hash = self.hash_snapshot(serialized_config)
# check that the hash is not already in the snapshots
if any([True if config_hash == md5 else False for md5 in self.configuration_hashes]):
# just skip the rest of the function and return the hash
return config_hash
# save the snapshot in the KV store
self.configuration_kv[config_hash] = serialized_config
return config_hash
def add_serial_configuration(self, serial_configuration):
# get the hash of the configuration
snaphash = self.hash_snapshot(serial_configuration)
# check that the hash is not already in the configurations
if any([True if snaphash == md5 else False for md5 in self.configuration_hashes]):
# just skip the rest of the function and return the hash
return snaphash
# save the configuration in the KV store
self.configuration_kv[snaphash] = serial_configuration
return snaphash
@property
def create_run_table_query(self):
create_run_table_query = """
CREATE TABLE IF NOT EXISTS runs
(start_hash TEXT NOT NULL,
end_hash TEXT NOT NULL,
config_hash NOT NULL,
last_cycle_idx INTEGER NOT NULL,
PRIMARY KEY (start_hash, end_hash))
"""
return create_run_table_query
@property
def add_run_record_query(self):
add_run_row_query = """
INSERT INTO runs (start_hash, end_hash, config_hash, last_cycle_idx)
VALUES (?, ?, ?, ?)
"""
return add_run_row_query
@property
def update_run_record_query(self):
q = """
UPDATE runs
SET config_hash = ?,
last_cycle_idx = ?
WHERE start_hash=? AND end_hash=?
"""
return q
@property
def delete_run_record_query(self):
q = """
DELETE FROM runs
WHERE start_hash=? AND end_hash=?
"""
return q
def _add_run_record(self, start_hash, end_hash, configuration_hash, cycle_idx):
params = (start_hash, end_hash, configuration_hash, cycle_idx)
# do it as a transaction
c = self._db.cursor()
# run the insert
c.execute(self.add_run_record_query, params)
def _delete_run_record(self, start_hash, end_hash):
params = (start_hash, end_hash)
cursor = self._db.cursor()
cursor.execute(self.delete_run_record_query, params)
def _update_run_record(self, start_hash, end_hash, new_config_hash, new_last_cycle_idx):
params = (new_config_hash, new_last_cycle_idx, start_hash, end_hash)
# do it as a transaction
c = self._db.cursor()
# run the update
c.execute(self.update_run_record_query, params)
def register_run(self, start_hash, end_hash, config_hash, cycle_idx):
"""
Parameters
----------
start_hash :
end_hash :
config_hash :
cycle_idx : int
The cycle of the simulation run the checkpoint was generated for.
Returns
-------
"""
# check that the hashes are for snapshots in the orchestrator
# if one is not registered raise an error
if not self.snapshot_hash_registered(start_hash):
raise OrchestratorError(
"snapshot start_hash {} is not registered with the orchestrator".format(
start_hash))
if not self.snapshot_hash_registered(end_hash):
raise OrchestratorError(
"snapshot end_hash {} is not registered with the orchestrator".format(
end_hash))
if not self.configuration_hash_registered(config_hash):
raise OrchestratorError(
"config hash {} is not registered with the orchestrator".format(
config_hash))
# save the configuration and get it's id
self._add_run_record(start_hash, end_hash, config_hash, cycle_idx)
def get_run_records(self):
get_run_record_query = """
SELECT *
FROM runs
""".format(fields=', '.join(self.RUN_SELECT_FIELDS))
cursor = self._db.cursor()
cursor.execute(get_run_record_query)
records = cursor.fetchall()
return records
def get_run_record(self, start_hash, end_hash):
get_run_record_query = """
SELECT {fields}
FROM runs
WHERE start_hash=? AND end_hash=?
""".format(fields=', '.join(self.RUN_SELECT_FIELDS))
params = (start_hash, end_hash)
cursor = self._db.cursor()
cursor.execute(get_run_record_query, params)
record = cursor.fetchone()
return record
def run_last_cycle_idx(self, start_hash, end_hash):
record = self.get_run_record(start_hash, end_hash)
last_cycle_idx = record[self.RUN_SELECT_FIELDS.index('last_cycle_idx')]
return last_cycle_idx
def run_configuration(self, start_hash, end_hash):
record = self.get_run_record(start_hash, end_hash)
config_hash = record[self.RUN_SELECT_FIELDS.index('config_hash')]
# get the configuration object and deserialize it
return self.deserialize(self.configuration_kv[config_hash])
def run_configuration_hash(self, start_hash, end_hash):
record = self.get_run_record(start_hash, end_hash)
config_hash = record[self.RUN_SELECT_FIELDS.index('config_hash')]
return config_hash
def run_hashes(self):
return [(rec[0], rec[1]) for rec in self.get_run_records()]
def run_continues(self, start_hash, end_hash):
"""Given a start hash and end hash for a run, find the run that this
continues.
Parameters
----------
start_hash :
end_hash :
Returns
-------
run_id
"""
# loop through the runs in this orchestrator until we find one
# where the start_hash matches the end hash
runs = self.run_hashes()
run_idx = 0
while True:
run_start_hash, run_end_hash = runs[run_idx]
# if the start hash of the queried run is the same as the
# end hash for this run we have found it
if start_hash == run_end_hash:
return (run_start_hash, run_end_hash)
run_idx += 1
# if the index is over the number of runs we quit and
# return None as no match
if run_idx >= len(runs):
return None
def _init_checkpoint_db(self, start_hash, configuration, checkpoint_dir, mode='x'):
logging.debug("Initializing checkpoint orch database")
# make the checkpoint with the default filename at the checkpoint directory
checkpoint_path = osp.join(checkpoint_dir, self.DEFAULT_CHECKPOINT_FILENAME)
# create a new database in the mode specified
logging.debug("Creating checkpoint database")
checkpoint_orch = Orchestrator(checkpoint_path, mode=mode)
# add the starting snapshot, bypassing the serialization stuff
logging.debug("Setting the starting snapshot")
checkpoint_orch.snapshot_kv[start_hash] = self.snapshot_kv[start_hash]
# if we have a new configuration at runtime serialize and
# hash it
serialized_config = self.serialize(configuration)
config_hash = self.hash_snapshot(serialized_config)
# save the configuration as well
checkpoint_orch.configuration_kv[config_hash] = serialized_config
checkpoint_orch.close()
logging.debug("closing connection to checkpoint database")
return checkpoint_path, config_hash
def _save_checkpoint(self, checkpoint_snapshot, config_hash,
checkpoint_db_path, cycle_idx,
):
"""
Parameters
----------
checkpoint_snapshot :
config_hash :
checkpoint_db_path :
mode :
(Default value = 'wb')
Returns
-------
"""
# orchestrator wrapper to the db
logging.debug("Opening the checkpoint orch database")
checkpoint_orch = Orchestrator(checkpoint_db_path, mode='r+')
# connection to the db
cursor = checkpoint_orch._db.cursor()
# we replicate the code for adding the snapshot here because
# we want it to occur transactionally the delete and add
# serialize the snapshot using the protocol for doing so
serialized_snapshot = self.serialize(checkpoint_snapshot)
# get the hash of the snapshot
snaphash = self.hash_snapshot(serialized_snapshot)
# the queries for deleting and inserting the new run record
delete_query = """
DELETE FROM runs
WHERE start_hash=?
AND end_hash=?
"""
insert_query = """
INSERT INTO runs (start_hash, end_hash, config_hash, last_cycle_idx)
VALUES (?, ?, ?, ?)
"""
# if there are any runs in the checkpoint orch remove the
# final snapshot
delete_params = None
if len(checkpoint_orch.run_hashes()) > 0:
start_hash, old_checkpoint_hash = checkpoint_orch.run_hashes()[0]
delete_params = (start_hash, old_checkpoint_hash)
else:
start_hash = list(checkpoint_orch.snapshot_kv.keys())[0]
# the config should already be in the orchestrator db
insert_params = (start_hash, snaphash, config_hash, cycle_idx)
# start this whole process as a transaction so we don't get
# something weird in between
logging.debug("Starting transaction for updating run table in checkpoint")
cursor.execute("BEGIN TRANSACTION")
# add the new one, using a special method for setting inside
# of a transaction
logging.debug("setting the new checkpoint snapshot into the KV")
cursor = checkpoint_orch.snapshot_kv.set_in_tx(cursor, snaphash, serialized_snapshot)
logging.debug("finished")
# if we need to delete the old end of the run snapshot and the
# run record for it
if delete_params is not None:
logging.debug("Old run record needs to be removed")
# remove the old run from the run table
logging.debug("Deleting the old run record")
cursor.execute(delete_query, delete_params)
logging.debug("finished")
# register the new run in the run table
logging.debug("Inserting the new run record")
cursor.execute(insert_query, insert_params)
logging.debug("finished")
# end the transaction
logging.debug("Finishing transaction")
cursor.execute("COMMIT")
logging.debug("Transaction committed")
# we do the removal of the old snapshot outside of the
# transaction since it is slow and can cause timeouts to
# occur. Furthermore, it is okay if it is in the checkpoint as
# the run record is what matters as long as the new checkpoint
# is there.
# delete the old snapshot if we need to
if delete_params is not None:
# WARN: occasionally and for unknown reasons we have found
# that the final checkpoint hash is the same as the one
# before. (The case where the last snapshot is on the same
# cycle as a backup is already covered). So as a last
# resort, we check that they don't have the same hash. If
# they do we don't delete it!
if snaphash != old_checkpoint_hash:
logging.debug("Deleting the old snapshot")
del checkpoint_orch.snapshot_kv[old_checkpoint_hash]
logging.debug("finished")
else:
logging.warn("Final snapshot has same hash as the previous checkpoint. Not deleting the previous one.")
checkpoint_orch.close()
logging.debug("closed the checkpoint orch connection")
@staticmethod
def gen_sim_manager(start_snapshot, configuration):
"""
Parameters
----------
start_snapshot :
configuration :
Returns
-------
"""
# construct the sim manager, in a wepy specific way
sim_manager = Manager(start_snapshot.walkers,
runner=start_snapshot.apparatus.filters[0],
boundary_conditions=start_snapshot.apparatus.filters[1],
resampler=start_snapshot.apparatus.filters[2],
# configuration options
work_mapper=configuration.work_mapper,
reporters=configuration.reporters,
sim_monitor=configuration.monitor,
)
return sim_manager
def run_snapshot_by_time(self, start_hash, run_time, n_steps,
checkpoint_freq=None,
checkpoint_dir=None,
configuration=None,
configuration_hash=None,
checkpoint_mode='x'):
"""For a finished run continue it but resetting all the state of the
resampler and boundary conditions
Parameters
----------
start_hash :
run_time :
n_steps :
checkpoint_freq :
(Default value = None)
checkpoint_dir :
(Default value = None)
configuration :
(Default value = None)
configuration_hash :
(Default value = None)
checkpoint_mode :
(Default value = None)
Returns
-------
"""
# you must have a checkpoint dir if you ask for a checkpoint
# frequency
if checkpoint_freq is not None and checkpoint_dir is None:
raise ValueError("Must provide a directory for the checkpoint file "
"is a frequency is specified")
if configuration_hash is not None and configuration is not None:
raise ValueError("Cannot specify both a hash of an existing configuration"
"and provide a runtime configuration")
# if no configuration was specified we use the default one, oth
elif (configuration is None) and (configuration_hash is None):
configuration = self.get_default_configuration()
# if a configuration hash was given only then we retrieve that
# configuration since we must pass configurations to the
# checkpoint DB initialization
elif configuration_hash is not None:
configuration = self.configuration_kv[configuration_hash]
# check that the directory for checkpoints exists, and create
# it if it doesn't and isn't already created
if checkpoint_dir is not None:
checkpoint_dir = osp.realpath(checkpoint_dir)
os.makedirs(checkpoint_dir, exist_ok=True)
# if the checkpoint dir is not specified don't create a
# checkpoint db orch
checkpoint_db_path = None
if checkpoint_dir is not None:
logging.debug("Initialization of checkpoint database is requested")
checkpoint_db_path, configuration_hash = self._init_checkpoint_db(start_hash,
configuration,
checkpoint_dir,
mode=checkpoint_mode)
logging.debug("finished initializing checkpoint database")
# get the snapshot and the configuration to use for the sim_manager
start_snapshot = self.get_snapshot(start_hash)
# generate the simulation manager given the snapshot and the
# configuration
sim_manager = self.gen_sim_manager(start_snapshot, configuration)
# handle and process the optional arguments for running simulation
if 'runner' in configuration.apparatus_opts:
runner_opts = configuration.apparatus_opts['runner']
else:
runner_opts = None
# run the init subroutine for the simulation manager
logging.debug("Running sim_manager.init")
sim_manager.init()
# run each cycle manually creating checkpoints when necessary
logging.debug("Starting run loop")
walkers = sim_manager.init_walkers
cycle_idx = 0
start_time = time.time()
while time.time() - start_time < run_time:
logging.debug("Running cycle {}".format(cycle_idx))
# run the cycle
walkers, filters = sim_manager.run_cycle(
walkers,
n_steps,
cycle_idx,
runner_opts=runner_opts,
)
# check to see if a checkpoint is necessary
if (checkpoint_freq is not None):
if (cycle_idx % checkpoint_freq == 0):
logging.debug("Checkpoint is required for this cycle")
# make the checkpoint snapshot
logging.debug("Generating the simulation snapshot")
checkpoint_snapshot = SimSnapshot(walkers, SimApparatus(filters))
# save the checkpoint (however that is implemented)
logging.debug("saving the checkpoint to the database")
self._save_checkpoint(checkpoint_snapshot,
configuration_hash,
checkpoint_db_path,
cycle_idx)
logging.debug("finished saving the checkpoint to the database")
# increase the cycle index for the next cycle
cycle_idx += 1
logging.debug("Finished the run cycle")
# the cycle index was set for the next cycle which didn't run
# so we decrement it
last_cycle_idx = cycle_idx - 1
logging.debug("Running sim_manager.cleanup")
# run the cleanup subroutine
sim_manager.cleanup()
# run the segment given the sim manager and run parameters
end_snapshot = SimSnapshot(walkers, SimApparatus(filters))
logging.debug("Run finished")
# return the things necessary for saving to the checkpoint if
# that is what is wanted later on
return end_snapshot, configuration_hash, checkpoint_db_path, last_cycle_idx
def orchestrate_snapshot_run_by_time(self, snapshot_hash, run_time, n_steps,
checkpoint_freq=None,
checkpoint_dir=None,
orchestrator_path=None,
configuration=None,
# these can reparametrize the paths
# for both the orchestrator produced
# files as well as the configuration
work_dir=None,
config_name=None,
narration=None,
mode=None,
# extra kwargs will be passed to the
# configuration.reparametrize method
**kwargs):
"""
Parameters
----------
snapshot_hash :
run_time :
n_steps :
checkpoint_freq :
(Default value = None)
checkpoint_dir :
(Default value = None)
orchestrator_path :
(Default value = None)
configuration :
(Default value = None)
# these can reparametrize the paths# for both the orchestrator produced# files as well as the configurationwork_dir :
(Default value = None)
config_name :
(Default value = None)
narration :
(Default value = None)
mode :
(Default value = None)
# extra kwargs will be passed to the# configuration.reparametrize method**kwargs :
Returns
-------
"""
# for writing the orchestration files we set the default mode
# if mode is not given
if mode is None:
# the orchestrator mode is used for pickling the
# orchestrator and so must be in bytes mode
orch_mode = self.DEFAULT_ORCHESTRATION_MODE
# there are two possible uses for the path reparametrizations:
# the configuration and the orchestrator file paths. If both
# of those are explicitly specified by passing in the whole
# configuration object or both of checkpoint_dir,
# orchestrator_path then those reparametrization kwargs will
# not be used. As this is likely not the intention of the user
# we will raise an error. If there is even one use for them no
# error will be raised.
# first check if any reparametrizations were even requested
parametrizations_requested = (True if work_dir is not None else False,
True if config_name is not None else False,
True if narration is not None else False,
True if mode is not None else False,)
# check if there are any available targets for reparametrization
reparametrization_targets = (True if configuration is None else False,
True if checkpoint_dir is None else False,
True if orchestrator_path is None else False)
# if paramatrizations were requested and there are no targets
# we need to raise an error
if any(parametrizations_requested) and not any(reparametrization_targets):
raise OrchestratorError("Reparametrizations were requested but none are possible,"
" due to all possible targets being already explicitly given")
# if any paths were not given and no defaults for path
# parameters we want to fill in the defaults for them. This
# will also fill in any missing parametrizations with defaults
# we do this by just setting the path parameters if they
# aren't set, then later the parametrization targets will be
# tested for if they have been set or not, and if they haven't
# then these will be used to generate paths for them.
if work_dir is None:
work_dir = self.DEFAULT_WORKDIR
if config_name is None:
config_name = self.DEFAULT_CONFIG_NAME
if narration is None:
narration = self.DEFAULT_NARRATION
if mode is None:
mode = self.DEFAULT_MODE
# if no configuration was specified use the default one
if configuration is None:
configuration = self.get_default_configuration()
# reparametrize the configuration with the given path
# parameters and anything else in kwargs. If they are none
# this will have no effect anyhow
logging.debug("Reparametrizing the configuration")
configuration = configuration.reparametrize(work_dir=work_dir,
config_name=config_name,
narration=narration,
mode=mode,
**kwargs)
# make parametric paths for the checkpoint directory and the
# orchestrator pickle to be made, unless they are explicitly given
if checkpoint_dir is None:
# the checkpoint directory will be in the work dir
logging.debug("checkpoint directory defaulted to the work_dir")
checkpoint_dir = work_dir
logging.debug("In the orchestrate run, calling to run_snapshot by time")
# then actually run the simulation with checkpointing. This
# returns the end snapshot and doesn't write out anything to
# orchestrators other than the checkpointing
(end_snapshot, configuration_hash, checkpoint_db_path, last_cycle_idx) =\
self.run_snapshot_by_time(snapshot_hash, run_time, n_steps,
checkpoint_freq=checkpoint_freq,
checkpoint_dir=checkpoint_dir,
configuration=configuration,
checkpoint_mode=orch_mode)
logging.debug("Finished running snapshot by time")
# if the last cycle in the run was a checkpoint skip this step
# of saving a checkpoint
do_final_checkpoint = True
# make sure the checkpoint_freq is defined before testing it
if checkpoint_freq is not None:
if checkpoint_freq % last_cycle_idx == 0:
logging.debug("Last cycle saved a checkpoint, no need to save one")
do_final_checkpoint = False
if do_final_checkpoint:
logging.debug("Saving a final checkpoint for the end of the run")
# now that it is finished we save the final snapshot to the
# checkpoint file. This is done transactionally using the
# SQLite transaction functionality (either succeeds or doesn't
# happen) that way we don't have worry about data integrity
# loss. Here we also don't have to worry about other processes
# interacting with the checkpoint which makes it isolated.
self._save_checkpoint(end_snapshot, configuration_hash,
checkpoint_db_path, last_cycle_idx)
logging.debug("Finished saving the final checkpoint for the run")
# then return the final orchestrator
logging.debug("Getting a connection to that orch to retun")
checkpoint_orch = Orchestrator(checkpoint_db_path,
mode='r+',
append_only=True)
return checkpoint_orch
def reconcile_orchestrators(host_path, *orchestrator_paths):
"""
Parameters
----------
template_orchestrator :
*orchestrators :
Returns
-------
"""
if not osp.exists(host_path):
assert len(orchestrator_paths) > 1, \
"If the host path is a new orchestrator, must give at least 2 orchestrators to merge."
# open the host orchestrator at the location which will have all
# of the new things put into it from the other orchestrators. If
# it doesn't already exist it will be created otherwise open
# read-write.
new_orch = Orchestrator(orch_path=host_path,
mode='a',
append_only=True)
# TODO deprecate, if there is no defaults we can't set them since
# the mode is append only, we don't really care about these so
# don't set them, otherwise do some mode logic to figure this out
# and open in write mode and set defaults, then change to append
# only
# # if this is an existing orchestrator copy the default
# # sim_apparatus and init_walkers
# try:
# default_app = new_orch.get_default_sim_apparatus()
# except KeyError:
# # no default apparatus, that is okay
# pass
# else:
# # set it
# new_orch.set_default_sim_apparatus(default_app)
# # same for the initial walkers
# try:
# default_walkers = new_orch.get_default_init_walkers()
# except KeyError:
# # no default apparatus, that is okay
# pass
# else:
# # set it
# new_orch.set_default_sim_apparatus(default_walkers)
for orch_path in orchestrator_paths:
# open it in read-write fail if doesn't exist
orch = Orchestrator(orch_path=orch_path,
mode='r+',
append_only=True)
# add in all snapshots from each orchestrator, by the hash not the
# snapshots themselves, we trust they are correct
for snaphash in orch.snapshot_hashes:
# check that the hash is not already in the snapshots
if any([True if snaphash == md5 else False for md5 in new_orch.snapshot_hashes]):
# skip it and move on
continue
# if it is not copy it over without deserializing
new_orch.snapshot_kv[snaphash] = orch.snapshot_kv[snaphash]
# add in the configurations for the runs from each
# orchestrator, by the hash not the snapshots themselves, we
# trust they are correct
for run_id in orch.run_hashes():
config_hash = orch.run_configuration_hash(*run_id)
# check that the hash is not already in the snapshots
if any([True if config_hash == md5 else False for md5 in new_orch.configuration_hashes]):
# skip it and move on
continue
# if it is not set it
new_orch.configuration_kv[config_hash] = orch.configuration_kv[config_hash]
# concatenate the run table with an SQL union from an attached
# database
attached_table_name = "other"
# query to attach the foreign database
attach_query = """
ATTACH '{}' AS {}
""".format(orch_path, attached_table_name)
# query to update the runs tabel with new unique runs
union_query = """
INSERT INTO runs
SELECT * FROM (
SELECT * FROM {}.runs
EXCEPT
SELECT * FROM runs
)
""".format(attached_table_name)
# query to detach the table
detach_query = """
DETACH {}
""".format(attached_table_name)
# then run the queries
cursor = new_orch._db.cursor()
try:
cursor.execute('BEGIN TRANSACTION')
cursor.execute(attach_query)
cursor.execute(union_query)
cursor.execute('COMMIT')
cursor.execute(detach_query)
except:
cursor.execute('COMMIT')
import pdb; pdb.set_trace()
cursor.execute("SELECT * FROM (SELECT * FROM other.runs EXCEPT SELECT * FROM runs)")
recs = cursor.fetchall()
return new_orch
| [((67, 23, 67, 47), 'wepy.util.kv.gen_uri', 'gen_uri', ({(67, 31, 67, 40): 'orch_path', (67, 42, 67, 46): 'mode'}, {}), '(orch_path, mode)', False, 'from wepy.util.kv import KV, SQLITE3_INMEMORY_URI, gen_uri\n'), ((72, 19, 73, 72), 'sqlite3.connect', 'sqlite3.connect', (), '', False, 'import sqlite3\n'), ((97, 27, 101, 59), 'wepy.util.kv.KV', 'KV', (), '', False, 'from wepy.util.kv import KV, SQLITE3_INMEMORY_URI, gen_uri\n'), ((106, 27, 111, 59), 'wepy.util.kv.KV', 'KV', (), '', False, 'from wepy.util.kv import KV, SQLITE3_INMEMORY_URI, gen_uri\n'), ((114, 32, 119, 64), 'wepy.util.kv.KV', 'KV', (), '', False, 'from wepy.util.kv import KV, SQLITE3_INMEMORY_URI, gen_uri\n'), ((765, 8, 765, 62), 'logging.debug', 'logging.debug', ({(765, 22, 765, 61): '"""Initializing checkpoint orch database"""'}, {}), "('Initializing checkpoint orch database')", False, 'import logging\n'), ((768, 26, 768, 84), 'os.path.join', 'osp.join', ({(768, 35, 768, 49): 'checkpoint_dir', (768, 51, 768, 83): 'self.DEFAULT_CHECKPOINT_FILENAME'}, {}), '(checkpoint_dir, self.DEFAULT_CHECKPOINT_FILENAME)', True, 'import os.path as osp\n'), ((771, 8, 771, 53), 'logging.debug', 'logging.debug', ({(771, 22, 771, 52): '"""Creating checkpoint database"""'}, {}), "('Creating checkpoint database')", False, 'import logging\n'), ((775, 8, 775, 54), 'logging.debug', 'logging.debug', ({(775, 22, 775, 53): '"""Setting the starting snapshot"""'}, {}), "('Setting the starting snapshot')", False, 'import logging\n'), ((787, 8, 787, 66), 'logging.debug', 'logging.debug', ({(787, 22, 787, 65): '"""closing connection to checkpoint database"""'}, {}), "('closing connection to checkpoint database')", False, 'import logging\n'), ((814, 8, 814, 61), 'logging.debug', 'logging.debug', ({(814, 22, 814, 60): '"""Opening the checkpoint orch database"""'}, {}), "('Opening the checkpoint orch database')", False, 'import logging\n'), ((857, 8, 857, 82), 'logging.debug', 'logging.debug', ({(857, 22, 857, 81): '"""Starting transaction for updating run table in checkpoint"""'}, {}), "('Starting transaction for updating run table in checkpoint')", False, 'import logging\n'), ((862, 8, 862, 72), 'logging.debug', 'logging.debug', ({(862, 22, 862, 71): '"""setting the new checkpoint snapshot into the KV"""'}, {}), "('setting the new checkpoint snapshot into the KV')", False, 'import logging\n'), ((864, 8, 864, 33), 'logging.debug', 'logging.debug', ({(864, 22, 864, 32): '"""finished"""'}, {}), "('finished')", False, 'import logging\n'), ((878, 8, 878, 53), 'logging.debug', 'logging.debug', ({(878, 22, 878, 52): '"""Inserting the new run record"""'}, {}), "('Inserting the new run record')", False, 'import logging\n'), ((880, 8, 880, 33), 'logging.debug', 'logging.debug', ({(880, 22, 880, 32): '"""finished"""'}, {}), "('finished')", False, 'import logging\n'), ((883, 8, 883, 46), 'logging.debug', 'logging.debug', ({(883, 22, 883, 45): '"""Finishing transaction"""'}, {}), "('Finishing transaction')", False, 'import logging\n'), ((885, 8, 885, 46), 'logging.debug', 'logging.debug', ({(885, 22, 885, 45): '"""Transaction committed"""'}, {}), "('Transaction committed')", False, 'import logging\n'), ((911, 8, 911, 62), 'logging.debug', 'logging.debug', ({(911, 22, 911, 61): '"""closed the checkpoint orch connection"""'}, {}), "('closed the checkpoint orch connection')", False, 'import logging\n'), ((931, 22, 939, 9), 'wepy.sim_manager.Manager', 'Manager', (), '', False, 'from wepy.sim_manager import Manager\n'), ((1033, 8, 1033, 49), 'logging.debug', 'logging.debug', ({(1033, 22, 1033, 48): '"""Running sim_manager.init"""'}, {}), "('Running sim_manager.init')", False, 'import logging\n'), ((1037, 8, 1037, 42), 'logging.debug', 'logging.debug', ({(1037, 22, 1037, 41): '"""Starting run loop"""'}, {}), "('Starting run loop')", False, 'import logging\n'), ((1040, 21, 1040, 32), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((1072, 8, 1072, 47), 'logging.debug', 'logging.debug', ({(1072, 22, 1072, 46): '"""Finished the run cycle"""'}, {}), "('Finished the run cycle')", False, 'import logging\n'), ((1078, 8, 1078, 52), 'logging.debug', 'logging.debug', ({(1078, 22, 1078, 51): '"""Running sim_manager.cleanup"""'}, {}), "('Running sim_manager.cleanup')", False, 'import logging\n'), ((1085, 8, 1085, 37), 'logging.debug', 'logging.debug', ({(1085, 22, 1085, 36): '"""Run finished"""'}, {}), "('Run finished')", False, 'import logging\n'), ((1216, 8, 1216, 80), 'logging.debug', 'logging.debug', ({(1216, 22, 1216, 79): '"""In the orchestrate run, calling to run_snapshot by time"""'}, {}), "('In the orchestrate run, calling to run_snapshot by time')", False, 'import logging\n'), ((1227, 8, 1227, 58), 'logging.debug', 'logging.debug', ({(1227, 22, 1227, 57): '"""Finished running snapshot by time"""'}, {}), "('Finished running snapshot by time')", False, 'import logging\n'), ((1253, 8, 1253, 67), 'logging.debug', 'logging.debug', ({(1253, 22, 1253, 66): '"""Getting a connection to that orch to retun"""'}, {}), "('Getting a connection to that orch to retun')", False, 'import logging\n'), ((1278, 11, 1278, 32), 'os.path.exists', 'osp.exists', ({(1278, 22, 1278, 31): 'host_path'}, {}), '(host_path)', True, 'import os.path as osp\n'), ((870, 12, 870, 63), 'logging.debug', 'logging.debug', ({(870, 26, 870, 62): '"""Old run record needs to be removed"""'}, {}), "('Old run record needs to be removed')", False, 'import logging\n'), ((873, 12, 873, 56), 'logging.debug', 'logging.debug', ({(873, 26, 873, 55): '"""Deleting the old run record"""'}, {}), "('Deleting the old run record')", False, 'import logging\n'), ((875, 12, 875, 37), 'logging.debug', 'logging.debug', ({(875, 26, 875, 36): '"""finished"""'}, {}), "('finished')", False, 'import logging\n'), ((1003, 29, 1003, 57), 'os.path.realpath', 'osp.realpath', ({(1003, 42, 1003, 56): 'checkpoint_dir'}, {}), '(checkpoint_dir)', True, 'import os.path as osp\n'), ((1004, 12, 1004, 54), 'os.makedirs', 'os.makedirs', (), '', False, 'import os\n'), ((1011, 12, 1011, 79), 'logging.debug', 'logging.debug', ({(1011, 26, 1011, 78): '"""Initialization of checkpoint database is requested"""'}, {}), "('Initialization of checkpoint database is requested')", False, 'import logging\n'), ((1016, 12, 1016, 70), 'logging.debug', 'logging.debug', ({(1016, 26, 1016, 69): '"""finished initializing checkpoint database"""'}, {}), "('finished initializing checkpoint database')", False, 'import logging\n'), ((1083, 44, 1083, 65), 'wepy.orchestration.snapshot.SimApparatus', 'SimApparatus', ({(1083, 57, 1083, 64): 'filters'}, {}), '(filters)', False, 'from wepy.orchestration.snapshot import SimApparatus, SimSnapshot\n'), ((1199, 12, 1199, 62), 'logging.debug', 'logging.debug', ({(1199, 26, 1199, 61): '"""Reparametrizing the configuration"""'}, {}), "('Reparametrizing the configuration')", False, 'import logging\n'), ((1212, 12, 1212, 75), 'logging.debug', 'logging.debug', ({(1212, 26, 1212, 74): '"""checkpoint directory defaulted to the work_dir"""'}, {}), "('checkpoint directory defaulted to the work_dir')", False, 'import logging\n'), ((1241, 12, 1241, 77), 'logging.debug', 'logging.debug', ({(1241, 26, 1241, 76): '"""Saving a final checkpoint for the end of the run"""'}, {}), "('Saving a final checkpoint for the end of the run')", False, 'import logging\n'), ((1250, 12, 1250, 77), 'logging.debug', 'logging.debug', ({(1250, 26, 1250, 76): '"""Finished saving the final checkpoint for the run"""'}, {}), "('Finished saving the final checkpoint for the run')", False, 'import logging\n'), ((233, 37, 233, 58), 'base64.b64decode', 'b64decode', ({(233, 47, 233, 57): 'serial_str'}, {}), '(serial_str)', False, 'from base64 import b64encode, b64decode\n'), ((325, 15, 325, 30), 'hashlib.md5', 'md5', ({(325, 19, 325, 29): 'serial_str'}, {}), '(serial_str)', False, 'from hashlib import md5\n'), ((903, 16, 903, 58), 'logging.debug', 'logging.debug', ({(903, 30, 903, 57): '"""Deleting the old snapshot"""'}, {}), "('Deleting the old snapshot')", False, 'import logging\n'), ((905, 16, 905, 41), 'logging.debug', 'logging.debug', ({(905, 30, 905, 40): '"""finished"""'}, {}), "('finished')", False, 'import logging\n'), ((907, 16, 907, 119), 'logging.warn', 'logging.warn', ({(907, 29, 907, 118): '"""Final snapshot has same hash as the previous checkpoint. Not deleting the previous one."""'}, {}), "(\n 'Final snapshot has same hash as the previous checkpoint. Not deleting the previous one.'\n )", False, 'import logging\n'), ((1041, 14, 1041, 25), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((1236, 16, 1236, 83), 'logging.debug', 'logging.debug', ({(1236, 30, 1236, 82): '"""Last cycle saved a checkpoint, no need to save one"""'}, {}), "('Last cycle saved a checkpoint, no need to save one')", False, 'import logging\n'), ((1390, 24, 1390, 39), 'pdb.set_trace', 'pdb.set_trace', ({}, {}), '()', False, 'import pdb\n'), ((205, 32, 205, 50), 'copy.deepcopy', 'deepcopy', ({(205, 41, 205, 49): 'snapshot'}, {}), '(snapshot)', False, 'from copy import copy, deepcopy\n'), ((1055, 20, 1055, 74), 'logging.debug', 'logging.debug', ({(1055, 34, 1055, 73): '"""Checkpoint is required for this cycle"""'}, {}), "('Checkpoint is required for this cycle')", False, 'import logging\n'), ((1058, 20, 1058, 71), 'logging.debug', 'logging.debug', ({(1058, 34, 1058, 70): '"""Generating the simulation snapshot"""'}, {}), "('Generating the simulation snapshot')", False, 'import logging\n'), ((1062, 20, 1062, 74), 'logging.debug', 'logging.debug', ({(1062, 34, 1062, 73): '"""saving the checkpoint to the database"""'}, {}), "('saving the checkpoint to the database')", False, 'import logging\n'), ((1067, 20, 1067, 83), 'logging.debug', 'logging.debug', ({(1067, 34, 1067, 82): '"""finished saving the checkpoint to the database"""'}, {}), "('finished saving the checkpoint to the database')", False, 'import logging\n'), ((1059, 63, 1059, 84), 'wepy.orchestration.snapshot.SimApparatus', 'SimApparatus', ({(1059, 76, 1059, 83): 'filters'}, {}), '(filters)', False, 'from wepy.orchestration.snapshot import SimApparatus, SimSnapshot\n')] |
HesterLim/pytorch-cnn-visualizations | src/generate_class_specific_samples.py | 59ddf0ef6ea2c9d4d69c1ac6b260cb399867d178 | """
Created on Thu Oct 26 14:19:44 2017
@author: Utku Ozbulak - github.com/utkuozbulak
"""
import os
import numpy as np
import torch
from torch.optim import SGD
from torchvision import models
from misc_functions import preprocess_image, recreate_image, save_image
class ClassSpecificImageGeneration():
"""
Produces an image that maximizes a certain class with gradient ascent
"""
def __init__(self, model, target_class):
self.mean = [-0.485, -0.456, -0.406]
self.std = [1/0.229, 1/0.224, 1/0.225]
self.model = model
self.model.eval()
self.target_class = target_class
# Generate a random image
self.created_image = np.uint8(np.random.uniform(0, 255, (224, 224, 3)))
# Create the folder to export images if not exists
if not os.path.exists('../generated/class_'+str(self.target_class)):
os.makedirs('../generated/class_'+str(self.target_class))
def generate(self, iterations=150):
"""Generates class specific image
Keyword Arguments:
iterations {int} -- Total iterations for gradient ascent (default: {150})
Returns:
np.ndarray -- Final maximally activated class image
"""
initial_learning_rate = 6
for i in range(1, iterations):
# Process image and return variable
self.processed_image = preprocess_image(self.created_image, False)
# Define optimizer for the image
optimizer = SGD([self.processed_image], lr=initial_learning_rate)
# Forward
output = self.model(self.processed_image)
# Target specific class
class_loss = -output[0, self.target_class]
if i % 10 == 0 or i == iterations-1:
print('Iteration:', str(i), 'Loss',
"{0:.2f}".format(class_loss.data.numpy()))
# Zero grads
self.model.zero_grad()
# Backward
class_loss.backward()
# Update image
optimizer.step()
# Recreate image
self.created_image = recreate_image(self.processed_image)
if i % 10 == 0 or i == iterations-1:
# Save image
im_path = '../generated/class_'+str(self.target_class)+'/c_'+str(self.target_class)+'_'+'iter_'+str(i)+'.png'
save_image(self.created_image, im_path)
return self.processed_image
if __name__ == '__main__':
target_class = 130 # Flamingo
pretrained_model = models.alexnet(pretrained=True)
csig = ClassSpecificImageGeneration(pretrained_model, target_class)
csig.generate()
| [((74, 23, 74, 54), 'torchvision.models.alexnet', 'models.alexnet', (), '', False, 'from torchvision import models\n'), ((27, 38, 27, 78), 'numpy.random.uniform', 'np.random.uniform', ({(27, 56, 27, 57): '0', (27, 59, 27, 62): '255', (27, 64, 27, 77): '(224, 224, 3)'}, {}), '(0, 255, (224, 224, 3))', True, 'import numpy as np\n'), ((44, 35, 44, 78), 'misc_functions.preprocess_image', 'preprocess_image', ({(44, 52, 44, 70): 'self.created_image', (44, 72, 44, 77): 'False'}, {}), '(self.created_image, False)', False, 'from misc_functions import preprocess_image, recreate_image, save_image\n'), ((47, 24, 47, 77), 'torch.optim.SGD', 'SGD', (), '', False, 'from torch.optim import SGD\n'), ((63, 33, 63, 69), 'misc_functions.recreate_image', 'recreate_image', ({(63, 48, 63, 68): 'self.processed_image'}, {}), '(self.processed_image)', False, 'from misc_functions import preprocess_image, recreate_image, save_image\n'), ((67, 16, 67, 55), 'misc_functions.save_image', 'save_image', ({(67, 27, 67, 45): 'self.created_image', (67, 47, 67, 54): 'im_path'}, {}), '(self.created_image, im_path)', False, 'from misc_functions import preprocess_image, recreate_image, save_image\n')] |
iltempe/osmosi | sumo/tools/net/visum_mapDistricts.py | c0f54ecdbb7c7b5602d587768617d0dc50f1d75d | #!/usr/bin/env python
"""
@file visum_mapDistricts.py
@author Daniel Krajzewicz
@author Michael Behrisch
@date 2007-10-25
@version $Id$
This script reads a network and a dump file and
draws the network, coloring it by the values
found within the dump-file.
SUMO, Simulation of Urban MObility; see http://sumo.dlr.de/
Copyright (C) 2008-2017 DLR (http://www.dlr.de/) and contributors
This file is part of SUMO.
SUMO is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import math
from optparse import OptionParser
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import sumolib.net
import netshiftadaptor
def computeDistance(n1, n2):
xd = n1._coord[0] - n2._coord[0]
yd = n1._coord[1] - n2._coord[1]
return math.sqrt(xd * xd + yd * yd)
def relAngle(angle1, angle2):
angle2 -= angle1
if angle2 > 180:
angle2 = (360. - angle2) * -1.
while angle2 < -180:
angle2 = 360 + angle2
return angle2
# initialise
optParser = OptionParser()
optParser.add_option("-v", "--verbose", action="store_true", dest="verbose",
default=False, help="tell me what you are doing")
# i/o
optParser.add_option("-1", "--net1", dest="net1",
help="SUMO network to use (mandatory)", metavar="FILE")
optParser.add_option("-2", "--net2", dest="net2",
help="SUMO network to use (mandatory)", metavar="FILE")
optParser.add_option("-a", "--nodes1", dest="nodes1",
help="The first matching nodes", metavar="NODELIST")
optParser.add_option("-b", "--nodes2", dest="nodes2",
help="The second matching nodes", metavar="NODELIST")
# parse options
(options, args) = optParser.parse_args()
# read networks
if options.verbose:
print("Reading net#1...")
net1 = sumolib.net.readNet(options.net1)
if options.verbose:
print("Reading net#2...")
net2 = sumolib.net.readNet(options.net2)
# reproject the visum net onto the navteq net
adaptor = netshiftadaptor.NetShiftAdaptor(
net1, net2, options.nodes1.split(","), options.nodes2.split(","))
adaptor.reproject(options.verbose)
# build a speed-up grid
xmin = 100000
xmax = -100000
ymin = 100000
ymax = -100000
for n in net1._nodes:
xmin = min(xmin, n._coord[0])
xmax = max(xmax, n._coord[0])
ymin = min(ymin, n._coord[1])
ymax = max(ymax, n._coord[1])
for n in net2._nodes:
xmin = min(xmin, n._coord[0])
xmax = max(xmax, n._coord[0])
ymin = min(ymin, n._coord[1])
ymax = max(ymax, n._coord[1])
xmin = xmin - .1
xmax = xmax + .1
ymin = ymin - .1
ymax = ymax + .1
CELLSIZE = 100
arr1 = []
arr2 = []
for y in range(0, CELLSIZE):
arr1.append([])
arr2.append([])
for x in range(0, CELLSIZE):
arr1[-1].append([])
arr2[-1].append([])
cw = (xmax - xmin) / float(CELLSIZE)
ch = (ymax - ymin) / float(CELLSIZE)
for n in net2._nodes:
cx = (n._coord[0] - xmin) / cw
cy = (n._coord[1] - ymin) / ch
arr1[int(cy)][int(cx)].append(n)
for n in net1._nodes:
cx = (n._coord[0] - xmin) / cw
cy = (n._coord[1] - ymin) / ch
arr2[int(cy)][int(cx)].append(n)
# map
nmap1to2 = {}
nmap2to1 = {}
nodes1 = net2._nodes
nodes2 = net1._nodes
highwayNodes2 = set()
highwaySinks2 = set()
highwaySources2 = set()
urbanNodes2 = set()
for n2 in nodes2:
noIncoming = 0
noOutgoing = 0
for e in n2._outgoing:
if e.getSpeed() > 80. / 3.6 and e.getSpeed() < 99:
highwayNodes2.add(n2)
if e.getSpeed() < 99:
noOutgoing = noOutgoing + 1
for e in n2._incoming:
if e.getSpeed() > 80. / 3.6 and e.getSpeed() < 99:
highwayNodes2.add(n2)
if e.getSpeed() < 99:
noIncoming = noIncoming + 1
if n2 in highwayNodes2:
if noOutgoing == 0:
highwaySinks2.add(n2)
if noIncoming == 0:
highwaySources2.add(n2)
else:
urbanNodes2.add(n2)
print("Found " + str(len(highwaySinks2)) + " highway sinks in net2")
cont = ""
for n in highwaySinks2:
cont = cont + n._id + ", "
print(cont)
cont = ""
print("Found " + str(len(highwaySources2)) + " highway sources in net2")
for n in highwaySources2:
cont = cont + n._id + ", "
print(cont)
fdd = open("dconns.con.xml", "w")
fdd.write("<connections>\n")
highwaySinks1 = set()
highwaySources1 = set()
origDistrictNodes = {}
nnn = {}
for n1 in nodes1:
if n1._id.find('-', 1) < 0:
continue
# if n1._id.find("38208387")<0:
# continue
un1 = None
for e in n1._outgoing:
un1 = e._to
for e in n1._incoming:
un1 = e._from
d = n1._id[:n1._id.find('-', 1)]
if d[0] == '-':
d = d[1:]
if d not in origDistrictNodes:
origDistrictNodes[d] = []
if options.verbose:
print("District: " + d)
isHighwayNode = False
isHighwaySink = False
isHighwaySource = False
noIncoming = 0
noOutgoing = 0
noInConns = 0
noOutConns = 0
for e in un1._outgoing:
if e.getSpeed() > 80. / 3.6 and e.getSpeed() < 99:
isHighwayNode = True
if e.getSpeed() < 99:
noOutgoing = noOutgoing + 1
if e.getSpeed() > 99:
noOutConns = noOutConns + 1
for e in un1._incoming:
if e.getSpeed() > 80. / 3.6 and e.getSpeed() < 99:
isHighwayNode = True
if e.getSpeed() < 99:
noIncoming = noIncoming + 1
if e.getSpeed() > 99:
noInConns = noInConns + 1
if options.verbose:
print("Check", un1._id, noOutgoing, noIncoming)
if isHighwayNode:
if noOutgoing == 0:
highwaySinks1.add(n1)
isHighwaySink = True
if noIncoming == 0:
highwaySources1.add(n1)
isHighwaySource = True
# the next is a hack for bad visum-networks
if noIncoming == 1 and noOutgoing == 1 and noInConns == 1 and noOutConns == 1:
highwaySinks1.add(n1)
isHighwaySink = True
highwaySources1.add(n1)
isHighwaySource = True
best = None
bestDist = -1
check = urbanNodes2
if n1 in highwaySinks1:
check = highwaySinks2
elif n1 in highwaySources1:
check = highwaySources2
elif isHighwayNode:
check = highwayNodes2
for n2 in check:
dist = computeDistance(un1, n2)
if bestDist == -1 or bestDist > dist:
best = n2
bestDist = dist
if best:
nnn[best] = n1
if d not in nmap1to2:
nmap1to2[d] = []
if best not in nmap1to2[d]:
nmap1to2[d].append(best)
if best not in nmap2to1:
nmap2to1[best] = []
if n1 not in nmap2to1[best]:
nmap2to1[best].append(n1)
if options.verbose:
print("a: " + d + "<->" + best._id)
if best not in origDistrictNodes[d]:
origDistrictNodes[d].append(best)
preBest = best
best = None
bestDist = -1
check = []
if n1 in highwaySinks1 or preBest in highwaySinks2:
check = highwaySources2
elif n1 in highwaySources1 or preBest in highwaySources2:
check = highwaySinks2
elif isHighwayNode:
check = highwayNodes2
for n2 in check:
dist = computeDistance(un1, n2)
if (bestDist == -1 or bestDist > dist) and n2 != preBest:
best = n2
bestDist = dist
if best:
nnn[best] = n1
if d not in nmap1to2:
nmap1to2[d] = []
if best not in nmap1to2[d]:
nmap1to2[d].append(best)
if best not in nmap2to1:
nmap2to1[best] = []
if n1 not in nmap2to1[best]:
nmap2to1[best].append(n1)
print("b: " + d + "<->" + best._id)
if best not in origDistrictNodes[d]:
origDistrictNodes[d].append(best)
if options.verbose:
print("Found " + str(len(highwaySinks1)) + " highway sinks in net1")
for n in highwaySinks1:
print(n._id)
print("Found " + str(len(highwaySources1)) + " highway sources in net1")
for n in highwaySources1:
print(n._id)
connectedNodesConnections = {}
for d in nmap1to2:
for n2 in nmap1to2[d]:
if n2 in connectedNodesConnections:
continue
n1i = net1.addNode("i" + n2._id, nnn[n2]._coord)
n1o = net1.addNode("o" + n2._id, nnn[n2]._coord)
haveIncoming = False
incomingLaneNo = 0
for e in n2._incoming:
if e._id[0] != "i" and e._id[0] != "o":
haveIncoming = True
incomingLaneNo = incomingLaneNo + e.getLaneNumber()
haveOutgoing = False
outgoingLaneNo = 0
for e in n2._outgoing:
if e._id[0] != "i" and e._id[0] != "o":
haveOutgoing = True
outgoingLaneNo = outgoingLaneNo + e.getLaneNumber()
if haveIncoming:
e1 = net1.addEdge("o" + n2._id, n2._id, n1o._id, -2)
if haveOutgoing:
net1.addLane(e1, 20, 100.)
else:
for i in range(0, incomingLaneNo):
net1.addLane(e1, 20, 100.)
if len(n2._incoming) == 1:
fdd.write(' <connection from="' + n2._incoming[
0]._id + '" to="' + e1._id + '" lane="' + str(i) + ':' + str(i) + '"/>\n')
if haveOutgoing:
if options.verbose:
print("has outgoing")
e2 = net1.addEdge("i" + n2._id, n1i._id, n2._id, -2)
if haveIncoming:
net1.addLane(e2, 20, 100.)
else:
for i in range(0, outgoingLaneNo):
net1.addLane(e2, 20, 100.)
if len(n2._outgoing) == 1:
fdd.write(' <connection from="' + e2._id + '" to="' +
n2._outgoing[0]._id + '" lane="' + str(i) + ':' + str(i) + '"/>\n')
connectedNodesConnections[n2] = [n1i, n1o]
newDistricts = {}
districtSources = {}
districtSinks = {}
mappedDistrictNodes = {}
connNodes = {}
dRemap = {}
for d in nmap1to2:
newDistricts[d] = []
if len(nmap1to2[d]) == 1:
n = nmap1to2[d][0]
if n in dRemap:
districtSources[d] = districtSources[dRemap[n]]
districtSinks[d] = districtSinks[dRemap[n]]
newDistricts[d] = []
newDistricts[d].append(n._id)
continue
else:
dRemap[n] = d
[ni, no] = connectedNodesConnections[n]
if len(ni._outgoing) > 0:
districtSources[d] = ni._outgoing[0]._id
if len(no._incoming) > 0:
districtSinks[d] = no._incoming[0]._id
fdd.write(' <connection from="' + no._incoming[0]._id + '"/>\n')
else:
incomingLaneNoG = 0
outgoingLaneNoG = 0
for n in nmap1to2[d]:
for e in n._incoming:
if e._id[0] != "i" and e._id[0] != "o":
incomingLaneNoG = incomingLaneNoG + e.getLaneNumber()
for e in n._outgoing:
if e._id[0] != "i" and e._id[0] != "o":
outgoingLaneNoG = outgoingLaneNoG + e.getLaneNumber()
p1 = [0, 0]
p11 = [0, 0]
p12 = [0, 0]
p2 = [0, 0]
for n in nmap1to2[d]:
p1[0] = p1[0] + n._coord[0]
p1[1] = p1[1] + n._coord[1]
p2[0] = p2[0] + nnn[n]._coord[0]
p2[1] = p2[1] + nnn[n]._coord[1]
p2[0] = (p1[0] + p2[0]) / float(len(origDistrictNodes[d]) * 2)
p2[1] = (p1[1] + p2[1]) / float(len(origDistrictNodes[d]) * 2)
dn2i = net1.addNode("cci" + d, p2)
dn2o = net1.addNode("cci" + d, p2)
p11[0] = p1[0] / float(len(origDistrictNodes[d]))
p11[1] = p1[1] / float(len(origDistrictNodes[d]))
dn1o = net1.addNode("co" + d, p11)
e1 = net1.addEdge("co" + d, dn1o._id, dn2o._id, -2)
for i in range(0, incomingLaneNoG):
net1.addLane(e1, 22, 100.)
districtSinks[d] = e1._id
p12[0] = p1[0] / float(len(origDistrictNodes[d]))
p12[1] = p1[1] / float(len(origDistrictNodes[d]))
dn1i = net1.addNode("ci" + d, p12)
e2 = net1.addEdge("ci" + d, dn2i._id, dn1i._id, -2)
for i in range(0, outgoingLaneNoG):
net1.addLane(e2, 21, 100.)
districtSources[d] = e2._id
runningOutLaneNumber = 0
runningInLaneNumber = 0
for n2 in nmap1to2[d]:
[ni, no] = connectedNodesConnections[n2]
print("In: " + ni._id + " " + str(len(ni._incoming)) +
" " + str(len(ni._outgoing)))
print("Out: " + no._id + " " + str(len(no._incoming)) +
" " + str(len(no._outgoing)))
if len(no._incoming) > 0:
incomingLaneNo = 0
for e in n2._incoming:
if e._id[0] != "i" and e._id[0] != "o":
incomingLaneNo = incomingLaneNo + e.getLaneNumber()
e1 = net1.addEdge("o" + d + "#" + n2._id, no._id, dn1o._id, -2)
for i in range(0, incomingLaneNo):
net1.addLane(e1, 19, 100.)
fdd.write(' <connection from="' + "o" + d + "#" + n2._id + '" to="' + dn1o._outgoing[
0]._id + '" lane="' + str(i) + ':' + str(runningOutLaneNumber) + '"/>\n')
runningOutLaneNumber = runningOutLaneNumber + 1
fdd.write(
' <connection from="' + dn1o._outgoing[0]._id + '"/>\n')
if incomingLaneNo == 0:
net1.addLane(e1, 19, 100.)
runningOutLaneNumber = runningOutLaneNumber + 1
if len(ni._outgoing) > 0:
outgoingLaneNo = 0
for e in n2._outgoing:
if e._id[0] != "i" and e._id[0] != "o":
outgoingLaneNo = outgoingLaneNo + e.getLaneNumber()
e2 = net1.addEdge("i" + d + "#" + n2._id, dn1i._id, ni._id, -2)
for i in range(0, outgoingLaneNo):
net1.addLane(e2, 18, 100.)
fdd.write(' <connection from="' + dn1i._incoming[
0]._id + '" to="' + "i" + d + "#" + n2._id + '" lane="' + str(runningInLaneNumber) + ':' + str(i) + '"/>\n')
runningInLaneNumber = runningInLaneNumber + 1
if outgoingLaneNo == 0:
net1.addLane(e2, 18, 100.)
runningInLaneNumber = runningInLaneNumber + 1
fd = open("districts.xml", "w")
fd.write("<tazs>\n")
for d in newDistricts:
fd.write(' <taz id="' + d + '">\n')
if d in districtSources:
fd.write(
' <tazSource id="' + districtSources[d] + '" weight="1"/>\n')
if d in districtSinks:
fd.write(
' <tazSink id="' + districtSinks[d] + '" weight="1"/>\n')
fd.write(' </taz>\n')
fd.write("</tazs>\n")
fd.close()
def writeNode(fd, node):
fd.write(" <node id=\"" + node._id + "\" x=\"" +
str(node._coord[0]) + "\" y=\"" + str(node._coord[1]) + "\"/>\n")
def writeEdge(fd, edge, withGeom=True):
fd.write(" <edge id=\"" + edge._id + "\" fromNode=\"" +
edge._from._id + "\" toNode=\"" + edge._to._id)
fd.write("\" speed=\"" + str(edge._speed))
fd.write("\" priority=\"" + str(edge._priority))
if withGeom:
fd.write("\" spreadType=\"center")
fd.write("\" numLanes=\"" + str(len(edge._lanes)) + "\"")
shape = edge.getShape()
if withGeom:
fd.write(" shape=\"")
for i, c in enumerate(shape):
if i != 0:
fd.write(" ")
fd.write(str(c[0]) + "," + str(c[1]))
fd.write("\"")
fd.write("/>\n")
def writeNodes(net):
fd = open("nodes.xml", "w")
fd.write("<nodes>\n")
for node in net._nodes:
writeNode(fd, node)
fd.write("</nodes>\n")
fd.close()
def writeEdges(net):
fd = open("edges.xml", "w")
fd.write("<edges>\n")
for edge in net._edges:
if edge._id.find("#") > 0 or edge._id.find("c") >= 0 or edge._id.find("i") >= 0:
writeEdge(fd, edge, False)
else:
writeEdge(fd, edge)
fd.write("</edges>\n")
fd.close()
fdd.write("</connections>\n")
writeNodes(net1)
writeEdges(net1)
| [((52, 12, 52, 26), 'optparse.OptionParser', 'OptionParser', ({}, {}), '()', False, 'from optparse import OptionParser\n'), ((39, 11, 39, 39), 'math.sqrt', 'math.sqrt', ({(39, 21, 39, 38): '(xd * xd + yd * yd)'}, {}), '(xd * xd + yd * yd)', False, 'import math\n'), ((31, 48, 31, 73), 'os.path.abspath', 'os.path.abspath', ({(31, 64, 31, 72): '__file__'}, {}), '(__file__)', False, 'import os\n')] |
bkpifc/BKPMediaDetector | BKPMediaDetector.py | 51858b45e218e0c4b5ed4d6aac6d751e029d850e | #!/usr/bin/env python3
######
# General Detector
# 06.12.2018 / Last Update: 20.05.2021
# LRB
######
import numpy as np
import os
import sys
import tensorflow as tf
import hashlib
import cv2
import magic
import PySimpleGUI as sg
import csv
import imagehash
import face_recognition
import subprocess
from itertools import groupby
from distutils.version import StrictVersion
from PIL import Image
from datetime import datetime
from time import strftime
from time import gmtime
from multiprocessing import Pool
from Models.Face import detect_face
from pathlib import Path
from openvino.inference_engine import IENetwork, IECore
from AudioAnalysis import audioAnalysis
######
# Worker function to check the input provided via the GUI
#######
def validateInput(gui_input):
error = False
#Validate input
# for element in gui_input[1][0:7]:
# if element == '' or []:
# error = True
if gui_input[0] == "Cancel" or len(gui_input[1][8]) == 0:
error = True
if bool(gui_input[1][5]) == True and gui_input[1][12] == "":
error = True
if error == True:
sg.Popup('You have not populated all required fields. Aborting!', title='Error', button_color=('black', 'red'), background_color=('grey'))
exit()
######
# Worker function to update the progress bar
######
def updateProgressMeter(step, customText):
if sg.OneLineProgressMeter('BKP Media Detector', step, 12, 'key', customText, orientation='h', size=(50, 25)) == False:
exit()
######
# Worker function to prepare and reshape the input images into a Numpy array
# and to calculate the MD5 hashes of them.
######
def load_image_into_numpy_array(image_path):
try:
image_path = str(image_path)
# Open, measure and convert image to RGB channels
image = Image.open(image_path)
(im_width, im_height) = image.size
if int(im_width) < 34 or int(im_height) < 34:
logfile.write("Insufficient file dimensions: " + str(image_path) + "\n")
return None
if int(im_width) > 4512 or int(im_height) > 3008:
maxheight = int(3008)
maxwidth = int(4512)
resize_ratio = min(maxwidth/im_width, maxheight/im_height)
im_width = int(im_width * resize_ratio)
im_height = int(im_height * resize_ratio)
image = image.resize((im_width, im_height))
image = image.convert('RGB')
np_array = np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
image.close()
# Hash the image in byte-chunks of 4096
hash_md5 = hashlib.md5()
with open(image_path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
f.close()
hashvalue = hash_md5.hexdigest()
return image_path, hashvalue, np_array
#Throw errors to stdout
except IOError or OSError:
magictype = str(magic.from_file((image_path), mime=True))
# If image file cannot be read, check if it is a video
if magictype[:5] == 'video': #or magictype[12:17] == 'octet':
# If so, return a video flag instead of numpy array
flag = "VIDEO"
elif magictype[:5] == 'audio':
flag = "AUDIO"
elif magictype[12:17] == 'octet':
flag = "OCTET"
else:
image_path = "Could not open file: " + str(image_path) + " (" + str(magictype) + ")\n"
flag = "ERROR"
return image_path, flag
except:
magictype = str(magic.from_file((image_path), mime=True))
logfile.write("General error with file: " + str(image_path) + " (" + str(magictype) + ")\n")
def check_video_orientation(image_path):
# Function to check video rotation with ffprobe and return corresponding CV2 rotation code
try:
cmnd = ['ffprobe', '-loglevel', 'error', '-select_streams', 'v:0', '-show_entries', 'stream_tags=rotate', '-of',
'default=nw=1:nk=1', image_path]
p = subprocess.Popen(cmnd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
orientation = out.decode('utf-8')
if orientation == '':
rotation = 3
elif int(orientation) == 180:
rotation = 1
elif int(orientation) == 90:
rotation = 0
else:
rotation = 2
return rotation
except:
logfile.write("Cannot determine video rotation: " + str(image_path) + "\n")
######
# Worker function to prepare and reshape the input videos to a Numpy array
# and to calculate the MD5 hashes of them.
# The function analyzes as much frames as indicated in the variable "frames_per_second" (Default = 0.5)
######
def load_video_into_numpy_array(image_path):
videoframes = []
old_hash = None
# Loading the video via the OpenCV framework
try:
rotation = check_video_orientation(image_path)
vidcap = cv2.VideoCapture(image_path)
im_width = int(vidcap.get(cv2.CAP_PROP_FRAME_WIDTH))
im_height = int(vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# Switch height/width if video is to be rotated 90/270 degrees
if rotation == 0 or rotation == 2:
im_width_new = im_height
im_height_new = im_width
im_width = im_width_new
im_height = im_height_new
# Calculating frames per second, total frame count and analyze rate
fps = int(vidcap.get(cv2.CAP_PROP_FPS))
framecount = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
analyze_rate = int(framecount / fps * frames_per_second)
if 0 < analyze_rate < max_frames_per_video:
int(analyze_rate)
elif analyze_rate >= int(max_frames_per_video):
analyze_rate = int(max_frames_per_video) #Limiting maximum frames per video
else:
videoerror = 'Unable to extract frames from video: ' + str(image_path) + '\n'
return videoerror
# Hashing the video once
hash_md5 = hashlib.md5()
with open(image_path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
hashvalue = hash_md5.hexdigest()
# Extracting the frames from the video
for percentile in range(0, analyze_rate):
vidcap.set(cv2.CAP_PROP_POS_FRAMES, (framecount / analyze_rate) * percentile)
success, extracted_frame = vidcap.read()
if rotation != 3:
extracted_frame = cv2.rotate(extracted_frame, rotation)
extracted_frame = cv2.cvtColor(extracted_frame, cv2.COLOR_BGR2RGB)
timecode = ((framecount / analyze_rate) * percentile) / fps
timecode = str(strftime("%H:%M:%S", gmtime(timecode)))
# And reshape them into a numpy array
np_array = np.array(extracted_frame).reshape(
(im_height, im_width, 3)).astype(np.uint8)
if video_sensitivity > 0:
# Compare the frame with the previous one for similarity, and drop if similar
frame_to_check = Image.fromarray(np_array)
new_hash = imagehash.phash(frame_to_check)
if old_hash is None or (new_hash - old_hash > video_sensitivity):
cluster = str(image_path + ";" + str(timecode)), hashvalue, np_array
videoframes.append(cluster)
old_hash = new_hash
else:
cluster = str(image_path + ";" + str(timecode)), hashvalue, np_array
videoframes.append(cluster)
vidcap.release()
return videoframes
except cv2.error:
videoerror = 'Could not process video: ' + str(image_path) + '\n'
return videoerror
except:
videoerror = 'General error processing video: ' + str(image_path) + '\n'
return videoerror
######
# Detection within loaded images with Tensorflow framework
# Creation of output file with hashes, detection scores and class
######
def run_inference_for_multiple_images(image_paths, images, hashvalues):
# Open the results file again
detectionresults_path = PATH_TO_RESULTS / 'Detection_Results.csv'
detectionresults = open(str(detectionresults_path), 'a')
for y in range(0, len(graphlist)):
# Create TF Session with loaded graph
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
logfile.write("*" + str(datetime.now()) + ": \tStarting detection with model " + str(y + 1) + " of " + str(len(graphlist)) + "*\n")
# Update progress indicator
updateProgressMeter(7 + y, 'Detecting with model {}'.format(graphlist[y]))
# Load the respective detetion graph from file
with tf.gfile.GFile(graphlist[y], 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# Create TF session
with tf.Session() as sess:
# Get handles to input and output tensors
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in [
'num_detections', 'detection_scores', 'detection_classes'
]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(
tensor_name)
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
# Setting the detection limit of the different models.
if "ISLogo" not in graphlist[y]:
detectionlimit = 0.5
else:
detectionlimit = 0.90
# Loading the label map of the corresponding graph
category_index = indexlist[y]
# Conduct actual detection within single image
for index, image in enumerate(images):
updateProgressMeter(7 + y, str(graphlist[y]) + '\nFile ' + str(index) + ' of ' + str(len(images)))
try:
output_dict = sess.run(tensor_dict,
feed_dict={image_tensor: np.expand_dims(image, 0)})
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_scores'] = output_dict['detection_scores'][0]
detectionhit = output_dict['num_detections']
output_dict['detection_classes'] = output_dict['detection_classes'][0]
hashvalue = hashvalues[index]
image_path = image_paths[index]
# Validate against detection limit (default: 65%) and write hash/score if above
for j in range(detectionhit):
score = output_dict['detection_scores'][j]
category = category_index[output_dict['detection_classes'][j]]
# Validate against the preconfigured minimum detection assurance and write to result file
if (score >= detectionlimit):
scorestring = str(score)
if REPORT_FORMAT[0] == 'Nuix':
line = ",".join([category['name'], "md5:" + hashvalue])
else:
line = ",".join([Path(image_path).name, hashvalue, scorestring, category['name']])
detectionresults.write(line + "\n")
except tf.errors.InvalidArgumentError:
logfile.write("Unable to process file dimensions of file with hash: \t" + str(hashvalue) + "\n")
logfile.write("*" + str(datetime.now()) + ": \tFinished detection with model " + str(y + 1) + "*\n")
detectionresults.flush()
detectionresults.close()
######
# Detect and count faces in loaded images
# Prepare and call age/gender detection once done
######
def faceDetection(image_paths, images, hashvalues):
detectionresults_path = PATH_TO_RESULTS / 'Detection_Results.csv'
detectionresults = open(str(detectionresults_path), 'a')
# Updating progress bar and logfile
updateProgressMeter(10, 'Detecting with Face/Age/Gender Detector')
logfile.write("*" + str(datetime.now()) + ": \tStarting detection with face/age/gender detection model*\n")
# Applying constants as defined in Facenet
minsize = 20
threshold = [0.6, 0.7, 0.7]
factor = 0.709
# Creating different TF Session
with tf.Session() as sess:
# read pnet, rnet, onet models from Models/Face directory
facemodel_path = Path('Models/Face')
pnet, rnet, onet = detect_face.create_mtcnn(sess, str(facemodel_path))
# Helperlists for age/gender detection
facelist = []
imagelist = []
# Inference for all images
for index, image in enumerate(images):
updateProgressMeter(10, 'Detecting with Face/Age/Gender Detector' + '\nFile ' + str(index) + ' of ' + str(len(images)))
try:
bounding_boxes, _ = detect_face.detect_face(image, minsize, pnet, rnet, onet, threshold, factor)
nrof_faces = bounding_boxes.shape[0]
# If a face was detected, go on
if nrof_faces > 0:
detectedFaces = bounding_boxes[:, 0:4]
detectedFacesArray = []
img_size = np.asarray(image.shape)[0:2]
if nrof_faces > 1:
for single_face in range(nrof_faces):
detectedFacesArray.append(np.squeeze(detectedFaces[single_face]))
else:
detectedFacesArray.append(np.squeeze(detectedFaces))
# Crop the detected face and add it to the list to conduct age/gender identification
for x, detectedFaces in enumerate(detectedFacesArray):
detectedFaces = np.squeeze(detectedFaces)
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(detectedFaces[0], 0)
bb[1] = np.maximum(detectedFaces[1], 0)
bb[2] = np.minimum(detectedFaces[2], img_size[1])
bb[3] = np.minimum(detectedFaces[3], img_size[0])
cropped_Face = image[bb[1]:bb[3], bb[0]:bb[2], :]
facelist.append(cropped_Face)
imagelist.append(index)
# Write the results of the face detection into the resultsfile
if not len(bounding_boxes) == 0:
hashvalue = hashvalues[index]
number_of_faces = len(bounding_boxes)
if REPORT_FORMAT[0] == 'Nuix':
line = "Face,md5:" + hashvalue
else:
line = str(Path(image_paths[index]).name) + "," + str(hashvalue) + ",FACES," + str(
number_of_faces) + "Faces"
detectionresults.write(line + "\n")
except tf.errors.InvalidArgumentError:
errorcount += 1
logfile.write("Unable to detect faces in file with hash: \t" + str(hashvalue) + "\n")
# Conduct age/gender recognition based on the list of detected & cropped faces
if len(facelist) != 0:
age_gender_detection(imagelist, facelist, hashvalues, image_paths)
logfile.write("*" + str(datetime.now()) + ": \tFinished detection with face/age/gender detection model*\n")
detectionresults.flush()
detectionresults.close()
######
# Detection with the OPEN VINO Framework
# Evaluate Age & Gender based on input faces
######
def age_gender_detection(imagelist, facelist, hashvalues, image_paths):
# Acquire the age-gender detection model
model_path = Path('Models/OpenVINO/age-gender')
model_xml = str(model_path / 'model.xml')
model_bin = str(model_path / 'model.bin')
# Reopen the results file
detectionresults_path = PATH_TO_RESULTS / 'Detection_Results.csv'
detectionresults = open(str(detectionresults_path), 'a')
# Plugin initialization for specified device and load extensions library if specified
ie = IECore()
# Read IR
net = IENetwork(model=model_xml, weights=model_bin)
input_blob = next(iter(net.inputs))
net.batch_size = len(facelist)
# Read and pre-process input images
n, c, h, w = net.inputs[input_blob].shape
images = np.ndarray(shape=(n, c, h, w))
# Loading model to the plugin
exec_net = ie.load_network(network=net, device_name='CPU')
# Resize and reshape input faces
for i in range(n):
image = facelist[i]
if image.shape[:-1] != (62, 62):
h, w = image.shape[:2]
# interpolation method
if h > 62 or w > 62: # shrinking image
interp = cv2.INTER_AREA
else: # stretching image
interp = cv2.INTER_CUBIC
# aspect ratio of image
aspect = w / h
# compute scaling and pad sizing
if aspect > 1: # horizontal image
new_w = 62
new_h = np.round(new_w / aspect).astype(int)
pad_vert = (62 - new_h) / 2
pad_top, pad_bot = np.floor(pad_vert).astype(int), np.ceil(pad_vert).astype(int)
pad_left, pad_right = 0, 0
elif aspect < 1: # vertical image
new_h = 62
new_w = np.round(new_h * aspect).astype(int)
pad_horz = (62 - new_w) / 2
pad_left, pad_right = np.floor(pad_horz).astype(int), np.ceil(pad_horz).astype(int)
pad_top, pad_bot = 0, 0
else: # square image
new_h, new_w = 62, 62
pad_left, pad_right, pad_top, pad_bot = 0, 0, 0, 0
# set pad color
padColor = 0
if len(image.shape) is 3 and not isinstance(padColor, (
list, tuple, np.ndarray)): # color image but only one color provided
padColor = [padColor] * 3
# scale and pad
scaled_img = cv2.resize(image, (new_w, new_h), interpolation=interp)
scaled_img = cv2.cvtColor(scaled_img, cv2.COLOR_BGR2RGB)
scaled_img = cv2.copyMakeBorder(scaled_img, pad_top, pad_bot, pad_left, pad_right,
borderType=cv2.BORDER_CONSTANT, value=padColor)
image = scaled_img.transpose((2, 0, 1)) # Change data layout from HWC to CHW
images[i] = image
# Conduct inference
res = exec_net.infer(inputs={input_blob: images})
# Process inference results
for y in range(len(facelist)):
probable_age = int(np.squeeze(res['age_conv3'][y]) * 100)
if np.squeeze(res['prob'][y][0]) > 0.5:
gender = "Female"
else:
gender = "Male"
age_gender_combo = str(probable_age) + str(gender)
# Write inference results to resultsfile
hashvalue = hashvalues[imagelist[y]]
if REPORT_FORMAT[0] == 'Nuix':
line = str(age_gender_combo) + ",md5:" + hashvalue
else:
line = str(Path(image_paths[imagelist[y]]).name) + "," + str(hashvalue) + ",AGE-GENDER," + str(
age_gender_combo)
detectionresults.write(line + "\n")
######
# Detection with the OPEN VINO Framework
# Creation of output file with hashes, detection scores and class
######
def run_inference_openvino(image_paths, images, hashvalue):
# Update progress meter and reopen results file
updateProgressMeter(6, 'Detecting with OpenVINO Object Detector')
logfile.write("*" + str(datetime.now()) + ": \tStarting detection with OpenVINO object detection model*\n")
detectionresults_path = PATH_TO_RESULTS / 'Detection_Results.csv'
detectionresults = open(str(detectionresults_path), 'a')
# Fetch paths for openvino model
model_path = Path('Models/OpenVINO/vgg19')
model_xml = str(model_path / 'model.xml')
model_bin = str(model_path / 'model.bin')
model_labels = str(model_path / 'model.labels')
temp_bilder = images
# Plugin initialization for specified device and load extensions library if specified
ie = IECore()
# Read IR
net = IENetwork(model=model_xml, weights=model_bin)
input_blob = next(iter(net.inputs))
out_blob = next(iter(net.outputs))
net.batch_size = 4000
# Read and pre-process input images
n, c, h, w = net.inputs[input_blob].shape
images = np.ndarray(shape=(n, c, h, w))
# Loading model to the plugin
exec_net = ie.load_network(network=net, device_name='CPU')
# Create batches to prevent RAM overload
batches = tuple(temp_bilder[x:x + net.batch_size] for x in range(0, len(temp_bilder), net.batch_size))
# Start sync inference
for batch in batches:
for index, temp_pic in enumerate(batch):
temp_pic = cv2.resize(temp_pic, (w, h))
temp_pic = temp_pic.transpose((2, 0, 1))
images[index] = temp_pic
res = exec_net.infer(inputs={input_blob: images})
# Processing output blob
res = res[out_blob]
# Prepare label file
with open(model_labels, 'r') as f:
labels_map = [x.split(sep=' ', maxsplit=1)[-1].strip() for x in f]
# Clean inference results and write them to resultsfile
for i, probs in enumerate(res):
probs = np.squeeze(probs)
top_ind = np.argsort(probs)[-3:][::-1]
for id in top_ind:
if probs[id] >= 0.3:
# det_label = labels_map[id] if labels_map else "{}".format(id)
det_label = labels_map[id].split(sep=' ', maxsplit=1)[1]
if REPORT_FORMAT[0] == 'Nuix':
line = ",".join([det_label, "md5:" + hashvalue])
else:
line = ",".join([Path(image_paths[i]).name, hashvalue[i], str(probs[id]), str(det_label)])
detectionresults.write(line + "\n")
logfile.write("*" + str(datetime.now()) + ": \tFinished detection with OpenVINO object detection model*\n")
######
# Worker function to load and encode known faces and to compare them against
# the provided input material
######
def faceRecognition(known_faces_path, image_paths, images, hashvalues):
# Update progress bar
updateProgressMeter(5, 'Conducting Face Recognition')
known_face_counter = 0
# Open the results file
detectionresults_path = PATH_TO_RESULTS / 'Detection_Results.csv'
detectionresults = open(str(detectionresults_path), 'a')
OutputPictureFolder = PATH_TO_RESULTS / 'DetectedFaces'
if not OutputPictureFolder.exists(): os.mkdir(str(OutputPictureFolder))
# Initiate array to store known faces
known_face_encodings = []
known_face_names = []
known_faces = Path.iterdir(Path(known_faces_path))
# Create encodings and store them with names
for known_face in known_faces:
known_person_image = face_recognition.load_image_file(known_face)
known_face_encodings.extend(face_recognition.face_encodings(known_person_image))
known_face_names.append(Path(known_face).stem)
logfile.write("*" + str(datetime.now()) + ": \tStarting face recognition with " + str(len(known_face_names)) + " known faces*\n")
# Load images, detect faces, encode and compare them to the known faces
for index, image_to_detect in enumerate(images):
hashvalue = hashvalues[index]
image_path = image_paths[index]
updateProgressMeter(5, 'Face Reco Image ' + str(index) + ' of ' + str(len(images)))
# Use GPU based model to detect & encode
face_locations = face_recognition.face_locations(image_to_detect, model="cnn")
face_encodings = face_recognition.face_encodings(image_to_detect, face_locations)
# Loop through each face in this frame of video
for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding, tolerance=facereq_tolerance)
name = "Unknown"
# Check the face distance and get best match
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
# If there is a match, write it to the output file
if name != "Unknown":
known_face_counter += 1
if REPORT_FORMAT[0] == 'Nuix':
line = ",".join([name, "md5:" + hashvalue])
else:
line = ",".join([Path(image_path).name, hashvalue, "FACE-Match", name])
detectionresults.write(line + "\n")
if output_detFaces:
# Export detected face with bounding box
cv2.rectangle(image_to_detect, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(image_to_detect, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(image_to_detect, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
savePath = str(OutputPictureFolder / str(Path(image_path).name)) + '.jpg'
detectedFace = Image.fromarray(image_to_detect)
detectedFace.save(savePath)
logfile.write("*" + str(datetime.now()) + ": \tFace Recognition completed.*\n")
detectionresults.flush()
detectionresults.close()
# Return amount of detected known faces
return known_face_counter
######
# Worker function to conduct speech detection in audio files
# for all audio files detected
######
def audioSpeechDetection(audiolist):
logfile.write("*" + str(datetime.now()) + ": \tStarting audio speech detection*\n")
updateProgressMeter(11, 'Processing Audio Files')
audiocounter = 0
# Open the results file
detectionresults_path = PATH_TO_RESULTS / 'Detection_Results.csv'
detectionresults = open(str(detectionresults_path), 'a')
pool = Pool(maxtasksperchild=100)
result = pool.map(audioAnalysis.segmentSpeechDetection, audiolist, chunksize=10)
pool.close()
# Synchronize after completion
pool.join()
pool.terminate()
result = [x for x in result if x != None]
for processedAudio in result:
speechPercentage, audiopath = processedAudio
# Check for the video flag
if not isinstance(speechPercentage, float):
logfile.write("Unsupported audio file: " + str(audiopath) + "\n")
else:
speechPercentage, audiopath = processedAudio
# Hashing the video once
hash_md5 = hashlib.md5()
with open(audiopath, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
hashvalue = hash_md5.hexdigest()
audiocounter += 1
if REPORT_FORMAT[0] == 'Nuix':
if speechPercentage != 0.0:
line = ",".join(["AUDIO-SPEECH", "md5:" + hashvalue])
else:
line = ",".join([Path(audiopath).name, hashvalue, str(speechPercentage), "AUDIO-SPEECH"])
detectionresults.write(line + "\n")
logfile.write("*" + str(datetime.now()) + ": \tAudio speech detection completed.*\n")
detectionresults.flush()
detectionresults.close()
return audiocounter
######
# Split the report file to allow seamless integration into XWays Hash Database per category
######
def createXWaysReport():
detectionresults_path = str(PATH_TO_RESULTS / 'Detection_Results.csv')
xways_folder = PATH_TO_RESULTS / 'XWaysOutput'
if not xways_folder.exists(): os.mkdir(str(xways_folder))
for key, rows in groupby(csv.reader(open(detectionresults_path)),
lambda row: row[3]):
# Replace special characters in categories
if str(key) != 'category':
key = str(key).replace("/","-")
key = str(key).replace(".", "")
key = str(key).replace("(", "")
key = str(key).replace(")", "")
key = key + '.txt'
detectionresults_single_path = xways_folder / key
with open(str(detectionresults_single_path), 'a') as rf:
for row in rows:
rf.write(row[1] + "\n")
rf.flush()
# Get a list of all files in results directory
resultsfiles = os.listdir(str(xways_folder))
# Prepend them with MD5 for seamless import into XWays
for file in resultsfiles:
line = "md5"
if file[-3:] == 'txt' and file != 'Logfile.txt':
with open(str(xways_folder / file), 'r+') as ff:
content = ff.read()
ff.seek(0,0)
ff.write(line.rstrip('\r\n') + '\n' + content)
######
#
# Main program function
# First initiates required parameters and variables, then loads the GUI
# After which the image and video load functions are triggered based on the input parameters
# Finally, the detection is executed and results written to the place requested
#
######
# Prevent execution when externally called
if __name__ == '__main__':
######
# Collecting parameters via GUI
######
sg.ChangeLookAndFeel('Dark')
layout = [[sg.Text('General Settings', font=("Helvetica", 13), text_color='sea green')],
[sg.Text('Please specify the folder holding the media data:')],
[sg.Input(), sg.FolderBrowse('Browse', initial_folder='/home/b/Desktop/TestBilder', button_color=('black', 'grey'))], #Path.home() = Initial folder
[sg.Text('Where shall I place the results?')],
[sg.Input(), sg.FolderBrowse('Browse', initial_folder='/home/b/Desktop/TestResults', button_color=('black', 'grey'))], #Path.home()
[sg.Text('TENSORFLOW DETECTORS')],
[sg.Checkbox('Objects/Persons', size=(15, 2)),
sg.Checkbox('Actions'),
sg.Checkbox('IS Logos'),
sg.Checkbox("Face Recognition")],
[sg.Text('OPEN VINO DETECTORS')],
[sg.Checkbox('Objects-fast', size=(15, 2)),
sg.Checkbox('Faces/Age/Gender')],
[sg.Text('Output Format:'), sg.Listbox(values=('Nuix', 'XWays', 'csv'), size=(29, 3))],
[sg.Text('Video Settings', font=("Helvetica", 13), text_color='sea green')],
[sg.Text('# of frames to be analyzed per Minute:', size=(36, 0))],
[sg.Slider(range=(1, 120), orientation='h', size=(29, 20), default_value=30)],
[sg.Text('Max. # of frames to be analyzed per Video:', size=(36, 0))],
[sg.Slider(range=(1, 500), orientation='h', size=(29, 20), default_value=100)],
[sg.Text('Check for & discard similar frames?'),
sg.InputCombo(('Yes', 'No'), default_value='No', size=(10, 2))],
[sg.Text('Face Recognition', font=("Helvetica", 13), text_color='sea green')],
[sg.Text('Specify folder with known faces (if FaceReq selected): ')],
[sg.Input(), sg.FolderBrowse('Browse', initial_folder='/home/b/Desktop/known', button_color=('black', 'grey'))],
[sg.Text('Specify face recognition tolerance (Default: 60%):', size=(48, 0))],
[sg.Slider(range=(0, 100), orientation='h', size=(29, 20), default_value=60)],
[sg.Checkbox('Output detected faces as jpg', size=(25, 2))],
[sg.Text('Audio Settings', font=("Helvetica", 13), text_color='sea green')],
[sg.Text('AUDIO PROCESSING')],
[sg.Checkbox('Speech Detection', size=(15, 2))],
[sg.OK(button_color=('black', 'sea green')), sg.Cancel(button_color=('black', 'grey'))]]
layout_progress = [[sg.Text('Detection in progress')],
[sg.ProgressBar(12, orientation='h', size=(20, 20), key='progressbar')],
[sg.Cancel()]]
# Render the GUI
gui_input = sg.Window('BKP Media Detector').Layout(layout).Read()
error = False
# Validate input
validateInput(gui_input)
# Initiating progress meter
updateProgressMeter(1, 'Initializing variables & parameters...')
startTime = datetime.now()
# Variable to determine minimum GPU Processor requirement & to disable TF log output
# os.environ['TF_MIN_GPU_MULTIPROCESSOR_COUNT'] = '5'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# Validating TF version
if StrictVersion(tf.__version__) < StrictVersion('1.9.0'):
raise ImportError('Please upgrade your TensorFlow installation to v1.9.* or later!')
# Defining multiple needed variables based on GUI input & adding TF/OpenVINO directory to path
PATH_TO_INPUT = Path(gui_input[1][0])
TEST_IMAGE_PATHS = Path.iterdir(PATH_TO_INPUT)
number_of_input = 0
for elements in Path.iterdir(PATH_TO_INPUT):
number_of_input += 1
PATH_TO_RESULTS = Path(gui_input[1][1])
PATH_TO_OBJECT_DETECTION_DIR = '/home/b/Programs/tensorflow/models/research' # PLACEHOLDER-tobereplacedWithPathtoDirectory
sys.path.append(PATH_TO_OBJECT_DETECTION_DIR)
REPORT_FORMAT = gui_input[1][8]
frames_per_second = gui_input[1][9] / 60
max_frames_per_video = gui_input[1][10]
video_sensitivity_text = gui_input[1][11]
KNOWN_FACES_PATH = gui_input[1][12]
facereq_tolerance = int(gui_input[1][13])/100
output_detFaces = gui_input[1][14]
if video_sensitivity_text == "Yes":
video_sensitivity = 20
else:
video_sensitivity = 0
# Check which models to apply and load their corresponding label maps
from object_detection.utils import label_map_util
graphlist = []
indexlist = []
MODEL1 = bool(gui_input[1][2])
if MODEL1:
OPEN_IMAGES_GRAPH = str(Path('Models/OpenImages/openimages.pb'))
OPEN_IMAGES_LABELS = str(OPEN_IMAGES_GRAPH)[:-3] + '.pbtxt'
OPEN_IMAGES_INDEX = label_map_util.create_category_index_from_labelmap(OPEN_IMAGES_LABELS)
graphlist.append(OPEN_IMAGES_GRAPH)
indexlist.append(OPEN_IMAGES_INDEX)
MODEL2 = bool(gui_input[1][3])
if MODEL2:
AVA_GRAPH = str(Path('Models/AVA/ava.pb'))
AVA_LABELS = str(AVA_GRAPH)[:-3] + '.pbtxt'
AVA_INDEX = label_map_util.create_category_index_from_labelmap(AVA_LABELS)
graphlist.append(AVA_GRAPH)
indexlist.append(AVA_INDEX)
MODEL3 = bool(gui_input[1][4])
if MODEL3:
SPECIAL_DETECTOR_GRAPH = str(Path('Models/ISLogos/islogos.pb'))
SPECIAL_DETECTOR_LABELS = str(SPECIAL_DETECTOR_GRAPH)[:-3] + '.pbtxt'
SPECIAL_DETECTOR_INDEX = label_map_util.create_category_index_from_labelmap(SPECIAL_DETECTOR_LABELS)
graphlist.append(SPECIAL_DETECTOR_GRAPH)
indexlist.append(SPECIAL_DETECTOR_INDEX)
FACE_RECOGNITION = bool(gui_input[1][5])
OPEN_VINO_vgg19 = bool(gui_input[1][6])
FACE_MODEL = bool(gui_input[1][7])
AUDIO_SPEECH_DETECTION = bool(gui_input[1][15])
# Update the progress indicator
updateProgressMeter(2, 'Process started. Loading ' + str(number_of_input) + ' media files...')
# Create logfile
logfile = open(str(PATH_TO_RESULTS / 'Logfile.txt'), 'w')
logfile.write('***DETECTION LOG***\n')
logfile.write("*" + str(datetime.now()) + ': \tProcess started. Loading images...*\n')
# Create resultsfile
detectionresults_path = PATH_TO_RESULTS / 'Detection_Results.csv'
detectionresults = open(str(detectionresults_path), 'w')
if REPORT_FORMAT[0] == 'Nuix':
detectionresults.write("tag,searchterm\n")
else:
detectionresults.write("name,hash,score,category\n")
detectionresults.flush()
detectionresults.close()
# Initiate needed variables
vidlist = []
audiolist = []
final_images = []
errors = []
# Multiprocess the image load function on all CPU cores available
pool = Pool(maxtasksperchild=100)
processed_images = pool.map(load_image_into_numpy_array, TEST_IMAGE_PATHS, chunksize=10)
pool.close()
# Synchronize after completion
pool.join()
pool.terminate()
# Clean the result for None types (where image conversion failed)
processed_images = [x for x in processed_images if x != None]
# Check for the different flags set by mimetype
for processed_image in processed_images:
if str(processed_image[1]) == "VIDEO":
# If present, populate the video list
vidlist.append(processed_image[0])
elif str(processed_image[1]) == "AUDIO":
audiolist.append(processed_image[0])
elif str(processed_image[1]) == "OCTET":
if processed_image[0][-3:] in ["mp4", "mov", "mpg", "avi", "exo", "mkv", "m4v", "ebm"]:
vidlist.append(processed_image[0])
else:
audiolist.append(processed_image[0])
elif str(processed_image[1]) == "ERROR":
errors.append(processed_image[0])
else:
# If not, put it to the final images list
final_images.append(processed_image)
for error in errors:
logfile.write(error)
logfile.flush()
# Count the number of images before adding the videoframes
number_of_images = len(final_images)
# Update the progress indicator
updateProgressMeter(3, 'Loading ' + str(len(vidlist)) + ' Videos...')
# Multiprocess the video load function on all CPU cores available
pool = Pool(maxtasksperchild=10)
videoframes = pool.map(load_video_into_numpy_array, vidlist, chunksize=2)
pool.close()
# Synchronize after completion
pool.join()
pool.terminate()
number_of_videos = 0
# Clean the result for None types (where video conversion failed)
for video in videoframes:
if type(video) is str:
errors.append(video)
if type(video) is list:
final_images.extend(video)
number_of_videos += 1
for error in errors:
logfile.write(error)
logfile.flush()
# Split the result from the loading function into hashes and image arrays
if len(final_images) != 0:
image_path, hashvalues, image_nps = zip(*final_images)
# Update the progress indicator & logfile
updateProgressMeter(4, 'Starting detection of ' + str(len(final_images)) + ' media files')
logfile.write("*" + str(datetime.now()) + ": \tLoading completed. Detecting...*\n")
# Conduct Face Recognition if needed
if FACE_RECOGNITION:
known_face_counter = faceRecognition(KNOWN_FACES_PATH, image_path, image_nps, hashvalues)
# Conduct OpenVino VGG19 Model if needed
if OPEN_VINO_vgg19:
run_inference_openvino(image_path, image_nps, hashvalues)
# Execute all other detection models
if len(final_images) != 0:
run_inference_for_multiple_images(image_path, image_nps, hashvalues)
# Conduct face/age/gender detection
if FACE_MODEL:
faceDetection(image_path, image_nps, hashvalues)
if AUDIO_SPEECH_DETECTION:
audiofiles_processed = audioSpeechDetection(audiolist)
else:
audiofiles_processed = 0
# Check whether an Xways report needs to be created
if REPORT_FORMAT[0] == 'XWays':
createXWaysReport()
# Write process statistics to logfile
logfile.write("*Results:\t\t\t" + str(PATH_TO_RESULTS / 'Detection_Results.csv*\n'))
logfile.write("*Total Amount of Files:\t\t" + str(number_of_input) + " (of which " + str(number_of_images + number_of_videos + audiofiles_processed) + " were processed.)*\n")
logfile.write("*Processed Images:\t\t" + str(number_of_images) + "*\n")
logfile.write("*Processed Videos: \t\t" + str(number_of_videos) + " (analyzed " + str(frames_per_second * 60) + " frames per minute, up to max. 500) with the check for content-based duplicates set to " + video_sensitivity_text + "\n")
logfile.write("*Processed Audio Files:\t\t" + str(audiofiles_processed) + "*\n")
logfile.write("*Applied models:\n")
for y in range(0, len(graphlist)): logfile.write("\t\t\t\t" + graphlist[y] + "\n")
if OPEN_VINO_vgg19: logfile.write("\t\t\t\tOpenVINO Object Detector\n")
if FACE_MODEL: logfile.write("\t\t\t\tFace-Age-Gender Detector\n")
if FACE_RECOGNITION: logfile.write("\t\t\t\tFace Recognition (Known faces detected: " + str(known_face_counter) + ")\n")
logfile.write("*Processing time:\t\t" + str(datetime.now() - startTime) + "*\n")
logfile.write("*Time per processed file:\t" + str((datetime.now() - startTime) / (number_of_images + number_of_videos + audiofiles_processed)) + "*\n")
logfile.flush()
logfile.close()
# Update progress indicator
sg.OneLineProgressMeter('BKP Media Detector', 12, 12, 'key', 'Detection finished',orientation='h',size=(100, 10))
# Deliver final success pop up to user
sg.Popup('The detection was successful',
'The results are placed here:',
'Path: "{}"'.format(str(PATH_TO_RESULTS)))
| [((412, 17, 412, 51), 'pathlib.Path', 'Path', ({(412, 22, 412, 50): '"""Models/OpenVINO/age-gender"""'}, {}), "('Models/OpenVINO/age-gender')", False, 'from pathlib import Path\n'), ((421, 9, 421, 17), 'openvino.inference_engine.IECore', 'IECore', ({}, {}), '()', False, 'from openvino.inference_engine import IENetwork, IECore\n'), ((424, 10, 424, 55), 'openvino.inference_engine.IENetwork', 'IENetwork', (), '', False, 'from openvino.inference_engine import IENetwork, IECore\n'), ((430, 13, 430, 43), 'numpy.ndarray', 'np.ndarray', (), '', True, 'import numpy as np\n'), ((519, 17, 519, 46), 'pathlib.Path', 'Path', ({(519, 22, 519, 45): '"""Models/OpenVINO/vgg19"""'}, {}), "('Models/OpenVINO/vgg19')", False, 'from pathlib import Path\n'), ((526, 9, 526, 17), 'openvino.inference_engine.IECore', 'IECore', ({}, {}), '()', False, 'from openvino.inference_engine import IENetwork, IECore\n'), ((529, 10, 529, 55), 'openvino.inference_engine.IENetwork', 'IENetwork', (), '', False, 'from openvino.inference_engine import IENetwork, IECore\n'), ((537, 13, 537, 43), 'numpy.ndarray', 'np.ndarray', (), '', True, 'import numpy as np\n'), ((678, 11, 678, 37), 'multiprocessing.Pool', 'Pool', (), '', False, 'from multiprocessing import Pool\n'), ((778, 4, 778, 32), 'PySimpleGUI.ChangeLookAndFeel', 'sg.ChangeLookAndFeel', ({(778, 25, 778, 31): '"""Dark"""'}, {}), "('Dark')", True, 'import PySimpleGUI as sg\n'), ((825, 16, 825, 30), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n'), ((835, 20, 835, 41), 'pathlib.Path', 'Path', ({(835, 25, 835, 40): 'gui_input[1][0]'}, {}), '(gui_input[1][0])', False, 'from pathlib import Path\n'), ((836, 23, 836, 50), 'pathlib.Path.iterdir', 'Path.iterdir', ({(836, 36, 836, 49): 'PATH_TO_INPUT'}, {}), '(PATH_TO_INPUT)', False, 'from pathlib import Path\n'), ((838, 20, 838, 47), 'pathlib.Path.iterdir', 'Path.iterdir', ({(838, 33, 838, 46): 'PATH_TO_INPUT'}, {}), '(PATH_TO_INPUT)', False, 'from pathlib import Path\n'), ((840, 22, 840, 43), 'pathlib.Path', 'Path', ({(840, 27, 840, 42): 'gui_input[1][1]'}, {}), '(gui_input[1][1])', False, 'from pathlib import Path\n'), ((842, 4, 842, 49), 'sys.path.append', 'sys.path.append', ({(842, 20, 842, 48): 'PATH_TO_OBJECT_DETECTION_DIR'}, {}), '(PATH_TO_OBJECT_DETECTION_DIR)', False, 'import sys\n'), ((918, 11, 918, 37), 'multiprocessing.Pool', 'Pool', (), '', False, 'from multiprocessing import Pool\n'), ((958, 11, 958, 36), 'multiprocessing.Pool', 'Pool', (), '', False, 'from multiprocessing import Pool\n'), ((1030, 4, 1030, 117), 'PySimpleGUI.OneLineProgressMeter', 'sg.OneLineProgressMeter', (), '', True, 'import PySimpleGUI as sg\n'), ((52, 8, 52, 146), 'PySimpleGUI.Popup', 'sg.Popup', (), '', True, 'import PySimpleGUI as sg\n'), ((59, 7, 59, 113), 'PySimpleGUI.OneLineProgressMeter', 'sg.OneLineProgressMeter', (), '', True, 'import PySimpleGUI as sg\n'), ((72, 16, 72, 38), 'PIL.Image.open', 'Image.open', ({(72, 27, 72, 37): 'image_path'}, {}), '(image_path)', False, 'from PIL import Image\n'), ((93, 19, 93, 32), 'hashlib.md5', 'hashlib.md5', ({}, {}), '()', False, 'import hashlib\n'), ((129, 12, 129, 82), 'subprocess.Popen', 'subprocess.Popen', (), '', False, 'import subprocess\n'), ((159, 17, 159, 45), 'cv2.VideoCapture', 'cv2.VideoCapture', ({(159, 34, 159, 44): 'image_path'}, {}), '(image_path)', False, 'import cv2\n'), ((184, 19, 184, 32), 'hashlib.md5', 'hashlib.md5', ({}, {}), '()', False, 'import hashlib\n'), ((242, 26, 242, 36), 'tensorflow.Graph', 'tf.Graph', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((339, 9, 339, 21), 'tensorflow.Session', 'tf.Session', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((342, 25, 342, 44), 'pathlib.Path', 'Path', ({(342, 30, 342, 43): '"""Models/Face"""'}, {}), "('Models/Face')", False, 'from pathlib import Path\n'), ((600, 31, 600, 53), 'pathlib.Path', 'Path', ({(600, 36, 600, 52): 'known_faces_path'}, {}), '(known_faces_path)', False, 'from pathlib import Path\n'), ((604, 29, 604, 73), 'face_recognition.load_image_file', 'face_recognition.load_image_file', ({(604, 62, 604, 72): 'known_face'}, {}), '(known_face)', False, 'import face_recognition\n'), ((616, 25, 616, 86), 'face_recognition.face_locations', 'face_recognition.face_locations', (), '', False, 'import face_recognition\n'), ((617, 25, 617, 89), 'face_recognition.face_encodings', 'face_recognition.face_encodings', ({(617, 57, 617, 72): 'image_to_detect', (617, 74, 617, 88): 'face_locations'}, {}), '(image_to_detect, face_locations)', False, 'import face_recognition\n'), ((831, 7, 831, 36), 'distutils.version.StrictVersion', 'StrictVersion', ({(831, 21, 831, 35): 'tf.__version__'}, {}), '(tf.__version__)', False, 'from distutils.version import StrictVersion\n'), ((831, 39, 831, 61), 'distutils.version.StrictVersion', 'StrictVersion', ({(831, 53, 831, 60): '"""1.9.0"""'}, {}), "('1.9.0')", False, 'from distutils.version import StrictVersion\n'), ((868, 28, 868, 98), 'object_detection.utils.label_map_util.create_category_index_from_labelmap', 'label_map_util.create_category_index_from_labelmap', ({(868, 79, 868, 97): 'OPEN_IMAGES_LABELS'}, {}), '(OPEN_IMAGES_LABELS)', False, 'from object_detection.utils import label_map_util\n'), ((876, 20, 876, 82), 'object_detection.utils.label_map_util.create_category_index_from_labelmap', 'label_map_util.create_category_index_from_labelmap', ({(876, 71, 876, 81): 'AVA_LABELS'}, {}), '(AVA_LABELS)', False, 'from object_detection.utils import label_map_util\n'), ((884, 33, 884, 108), 'object_detection.utils.label_map_util.create_category_index_from_labelmap', 'label_map_util.create_category_index_from_labelmap', ({(884, 84, 884, 107): 'SPECIAL_DETECTOR_LABELS'}, {}), '(SPECIAL_DETECTOR_LABELS)', False, 'from object_detection.utils import label_map_util\n'), ((198, 30, 198, 78), 'cv2.cvtColor', 'cv2.cvtColor', ({(198, 43, 198, 58): 'extracted_frame', (198, 60, 198, 77): 'cv2.COLOR_BGR2RGB'}, {}), '(extracted_frame, cv2.COLOR_BGR2RGB)', False, 'import cv2\n'), ((244, 27, 244, 40), 'tensorflow.GraphDef', 'tf.GraphDef', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((474, 25, 474, 80), 'cv2.resize', 'cv2.resize', (), '', False, 'import cv2\n'), ((475, 25, 475, 68), 'cv2.cvtColor', 'cv2.cvtColor', ({(475, 38, 475, 48): 'scaled_img', (475, 50, 475, 67): 'cv2.COLOR_BGR2RGB'}, {}), '(scaled_img, cv2.COLOR_BGR2RGB)', False, 'import cv2\n'), ((476, 25, 477, 91), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (), '', False, 'import cv2\n'), ((488, 11, 488, 40), 'numpy.squeeze', 'np.squeeze', ({(488, 22, 488, 39): "res['prob'][y][0]"}, {}), "(res['prob'][y][0])", True, 'import numpy as np\n'), ((548, 23, 548, 51), 'cv2.resize', 'cv2.resize', ({(548, 34, 548, 42): 'temp_pic', (548, 44, 548, 50): '(w, h)'}, {}), '(temp_pic, (w, h))', False, 'import cv2\n'), ((563, 20, 563, 37), 'numpy.squeeze', 'np.squeeze', ({(563, 31, 563, 36): 'probs'}, {}), '(probs)', True, 'import numpy as np\n'), ((605, 36, 605, 87), 'face_recognition.face_encodings', 'face_recognition.face_encodings', ({(605, 68, 605, 86): 'known_person_image'}, {}), '(known_person_image)', False, 'import face_recognition\n'), ((623, 22, 623, 118), 'face_recognition.compare_faces', 'face_recognition.compare_faces', (), '', False, 'import face_recognition\n'), ((627, 29, 627, 96), 'face_recognition.face_distance', 'face_recognition.face_distance', ({(627, 60, 627, 80): 'known_face_encodings', (627, 82, 627, 95): 'face_encoding'}, {}), '(known_face_encodings, face_encoding)', False, 'import face_recognition\n'), ((628, 31, 628, 56), 'numpy.argmin', 'np.argmin', ({(628, 41, 628, 55): 'face_distances'}, {}), '(face_distances)', True, 'import numpy as np\n'), ((697, 23, 697, 36), 'hashlib.md5', 'hashlib.md5', ({}, {}), '()', False, 'import hashlib\n'), ((780, 15, 780, 90), 'PySimpleGUI.Text', 'sg.Text', (), '', True, 'import PySimpleGUI as sg\n'), ((781, 15, 781, 75), 'PySimpleGUI.Text', 'sg.Text', ({(781, 23, 781, 74): '"""Please specify the folder holding the media data:"""'}, {}), "('Please specify the folder holding the media data:')", True, 'import PySimpleGUI as sg\n'), ((782, 15, 782, 25), 'PySimpleGUI.Input', 'sg.Input', ({}, {}), '()', True, 'import PySimpleGUI as sg\n'), ((782, 27, 782, 129), 'PySimpleGUI.FolderBrowse', 'sg.FolderBrowse', (), '', True, 'import PySimpleGUI as sg\n'), ((783, 15, 783, 58), 'PySimpleGUI.Text', 'sg.Text', ({(783, 23, 783, 57): '"""Where shall I place the results?"""'}, {}), "('Where shall I place the results?')", True, 'import PySimpleGUI as sg\n'), ((784, 15, 784, 25), 'PySimpleGUI.Input', 'sg.Input', ({}, {}), '()', True, 'import PySimpleGUI as sg\n'), ((784, 27, 784, 130), 'PySimpleGUI.FolderBrowse', 'sg.FolderBrowse', (), '', True, 'import PySimpleGUI as sg\n'), ((785, 15, 785, 46), 'PySimpleGUI.Text', 'sg.Text', ({(785, 23, 785, 45): '"""TENSORFLOW DETECTORS"""'}, {}), "('TENSORFLOW DETECTORS')", True, 'import PySimpleGUI as sg\n'), ((786, 15, 786, 59), 'PySimpleGUI.Checkbox', 'sg.Checkbox', (), '', True, 'import PySimpleGUI as sg\n'), ((787, 15, 787, 37), 'PySimpleGUI.Checkbox', 'sg.Checkbox', ({(787, 27, 787, 36): '"""Actions"""'}, {}), "('Actions')", True, 'import PySimpleGUI as sg\n'), ((788, 15, 788, 38), 'PySimpleGUI.Checkbox', 'sg.Checkbox', ({(788, 27, 788, 37): '"""IS Logos"""'}, {}), "('IS Logos')", True, 'import PySimpleGUI as sg\n'), ((789, 15, 789, 46), 'PySimpleGUI.Checkbox', 'sg.Checkbox', ({(789, 27, 789, 45): '"""Face Recognition"""'}, {}), "('Face Recognition')", True, 'import PySimpleGUI as sg\n'), ((790, 15, 790, 45), 'PySimpleGUI.Text', 'sg.Text', ({(790, 23, 790, 44): '"""OPEN VINO DETECTORS"""'}, {}), "('OPEN VINO DETECTORS')", True, 'import PySimpleGUI as sg\n'), ((791, 15, 791, 56), 'PySimpleGUI.Checkbox', 'sg.Checkbox', (), '', True, 'import PySimpleGUI as sg\n'), ((792, 15, 792, 46), 'PySimpleGUI.Checkbox', 'sg.Checkbox', ({(792, 27, 792, 45): '"""Faces/Age/Gender"""'}, {}), "('Faces/Age/Gender')", True, 'import PySimpleGUI as sg\n'), ((793, 15, 793, 40), 'PySimpleGUI.Text', 'sg.Text', ({(793, 23, 793, 39): '"""Output Format:"""'}, {}), "('Output Format:')", True, 'import PySimpleGUI as sg\n'), ((793, 42, 793, 99), 'PySimpleGUI.Listbox', 'sg.Listbox', (), '', True, 'import PySimpleGUI as sg\n'), ((794, 15, 794, 88), 'PySimpleGUI.Text', 'sg.Text', (), '', True, 'import PySimpleGUI as sg\n'), ((795, 15, 795, 78), 'PySimpleGUI.Text', 'sg.Text', (), '', True, 'import PySimpleGUI as sg\n'), ((796, 15, 796, 90), 'PySimpleGUI.Slider', 'sg.Slider', (), '', True, 'import PySimpleGUI as sg\n'), ((797, 15, 797, 82), 'PySimpleGUI.Text', 'sg.Text', (), '', True, 'import PySimpleGUI as sg\n'), ((798, 15, 798, 91), 'PySimpleGUI.Slider', 'sg.Slider', (), '', True, 'import PySimpleGUI as sg\n'), ((799, 15, 799, 61), 'PySimpleGUI.Text', 'sg.Text', ({(799, 23, 799, 60): '"""Check for & discard similar frames?"""'}, {}), "('Check for & discard similar frames?')", True, 'import PySimpleGUI as sg\n'), ((800, 15, 800, 77), 'PySimpleGUI.InputCombo', 'sg.InputCombo', (), '', True, 'import PySimpleGUI as sg\n'), ((801, 15, 801, 90), 'PySimpleGUI.Text', 'sg.Text', (), '', True, 'import PySimpleGUI as sg\n'), ((802, 15, 802, 81), 'PySimpleGUI.Text', 'sg.Text', ({(802, 23, 802, 80): '"""Specify folder with known faces (if FaceReq selected): """'}, {}), "('Specify folder with known faces (if FaceReq selected): ')", True, 'import PySimpleGUI as sg\n'), ((803, 15, 803, 25), 'PySimpleGUI.Input', 'sg.Input', ({}, {}), '()', True, 'import PySimpleGUI as sg\n'), ((803, 27, 803, 124), 'PySimpleGUI.FolderBrowse', 'sg.FolderBrowse', (), '', True, 'import PySimpleGUI as sg\n'), ((804, 15, 804, 90), 'PySimpleGUI.Text', 'sg.Text', (), '', True, 'import PySimpleGUI as sg\n'), ((805, 15, 805, 90), 'PySimpleGUI.Slider', 'sg.Slider', (), '', True, 'import PySimpleGUI as sg\n'), ((806, 15, 806, 72), 'PySimpleGUI.Checkbox', 'sg.Checkbox', (), '', True, 'import PySimpleGUI as sg\n'), ((807, 15, 807, 88), 'PySimpleGUI.Text', 'sg.Text', (), '', True, 'import PySimpleGUI as sg\n'), ((808, 15, 808, 42), 'PySimpleGUI.Text', 'sg.Text', ({(808, 23, 808, 41): '"""AUDIO PROCESSING"""'}, {}), "('AUDIO PROCESSING')", True, 'import PySimpleGUI as sg\n'), ((809, 15, 809, 60), 'PySimpleGUI.Checkbox', 'sg.Checkbox', (), '', True, 'import PySimpleGUI as sg\n'), ((810, 15, 810, 57), 'PySimpleGUI.OK', 'sg.OK', (), '', True, 'import PySimpleGUI as sg\n'), ((810, 59, 810, 100), 'PySimpleGUI.Cancel', 'sg.Cancel', (), '', True, 'import PySimpleGUI as sg\n'), ((812, 24, 812, 56), 'PySimpleGUI.Text', 'sg.Text', ({(812, 32, 812, 55): '"""Detection in progress"""'}, {}), "('Detection in progress')", True, 'import PySimpleGUI as sg\n'), ((813, 24, 813, 93), 'PySimpleGUI.ProgressBar', 'sg.ProgressBar', (), '', True, 'import PySimpleGUI as sg\n'), ((814, 24, 814, 35), 'PySimpleGUI.Cancel', 'sg.Cancel', ({}, {}), '()', True, 'import PySimpleGUI as sg\n'), ((866, 32, 866, 71), 'pathlib.Path', 'Path', ({(866, 37, 866, 70): '"""Models/OpenImages/openimages.pb"""'}, {}), "('Models/OpenImages/openimages.pb')", False, 'from pathlib import Path\n'), ((874, 24, 874, 49), 'pathlib.Path', 'Path', ({(874, 29, 874, 48): '"""Models/AVA/ava.pb"""'}, {}), "('Models/AVA/ava.pb')", False, 'from pathlib import Path\n'), ((882, 37, 882, 70), 'pathlib.Path', 'Path', ({(882, 42, 882, 69): '"""Models/ISLogos/islogos.pb"""'}, {}), "('Models/ISLogos/islogos.pb')", False, 'from pathlib import Path\n'), ((104, 24, 104, 64), 'magic.from_file', 'magic.from_file', (), '', False, 'import magic\n'), ((119, 24, 119, 64), 'magic.from_file', 'magic.from_file', (), '', False, 'import magic\n'), ((196, 34, 196, 71), 'cv2.rotate', 'cv2.rotate', ({(196, 45, 196, 60): 'extracted_frame', (196, 62, 196, 70): 'rotation'}, {}), '(extracted_frame, rotation)', False, 'import cv2\n'), ((208, 33, 208, 58), 'PIL.Image.fromarray', 'Image.fromarray', ({(208, 49, 208, 57): 'np_array'}, {}), '(np_array)', False, 'from PIL import Image\n'), ((209, 27, 209, 58), 'imagehash.phash', 'imagehash.phash', ({(209, 43, 209, 57): 'frame_to_check'}, {}), '(frame_to_check)', False, 'import imagehash\n'), ((251, 17, 251, 51), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', ({(251, 32, 251, 44): 'graphlist[y]', (251, 46, 251, 50): '"""rb"""'}, {}), "(graphlist[y], 'rb')", True, 'import tensorflow as tf\n'), ((254, 16, 254, 58), 'tensorflow.import_graph_def', 'tf.import_graph_def', (), '', True, 'import tensorflow as tf\n'), ((257, 17, 257, 29), 'tensorflow.Session', 'tf.Session', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((353, 36, 353, 112), 'Models.Face.detect_face.detect_face', 'detect_face.detect_face', ({(353, 60, 353, 65): 'image', (353, 67, 353, 74): 'minsize', (353, 76, 353, 80): 'pnet', (353, 82, 353, 86): 'rnet', (353, 88, 353, 92): 'onet', (353, 94, 353, 103): 'threshold', (353, 105, 353, 111): 'factor'}, {}), '(image, minsize, pnet, rnet, onet, threshold, factor)', False, 'from Models.Face import detect_face\n'), ((487, 27, 487, 58), 'numpy.squeeze', 'np.squeeze', ({(487, 38, 487, 57): "res['age_conv3'][y]"}, {}), "(res['age_conv3'][y])", True, 'import numpy as np\n'), ((606, 32, 606, 48), 'pathlib.Path', 'Path', ({(606, 37, 606, 47): 'known_face'}, {}), '(known_face)', False, 'from pathlib import Path\n'), ((200, 48, 200, 64), 'time.gmtime', 'gmtime', ({(200, 55, 200, 63): 'timecode'}, {}), '(timecode)', False, 'from time import gmtime\n'), ((331, 28, 331, 42), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n'), ((400, 28, 400, 42), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n'), ((514, 28, 514, 42), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n'), ((565, 22, 565, 39), 'numpy.argsort', 'np.argsort', ({(565, 33, 565, 38): 'probs'}, {}), '(probs)', True, 'import numpy as np\n'), ((577, 28, 577, 42), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n'), ((643, 20, 643, 96), 'cv2.rectangle', 'cv2.rectangle', ({(643, 34, 643, 49): 'image_to_detect', (643, 51, 643, 62): '(left, top)', (643, 64, 643, 79): '(right, bottom)', (643, 81, 643, 92): '(0, 0, 255)', (643, 94, 643, 95): '(2)'}, {}), '(image_to_detect, (left, top), (right, bottom), (0, 0, 255), 2)', False, 'import cv2\n'), ((646, 20, 646, 113), 'cv2.rectangle', 'cv2.rectangle', ({(646, 34, 646, 49): 'image_to_detect', (646, 51, 646, 70): '(left, bottom - 35)', (646, 72, 646, 87): '(right, bottom)', (646, 89, 646, 100): '(0, 0, 255)', (646, 102, 646, 112): 'cv2.FILLED'}, {}), '(image_to_detect, (left, bottom - 35), (right, bottom), (0, 0,\n 255), cv2.FILLED)', False, 'import cv2\n'), ((648, 20, 648, 109), 'cv2.putText', 'cv2.putText', ({(648, 32, 648, 47): 'image_to_detect', (648, 49, 648, 53): 'name', (648, 55, 648, 77): '(left + 6, bottom - 6)', (648, 79, 648, 83): 'font', (648, 85, 648, 88): '(1.0)', (648, 90, 648, 105): '(255, 255, 255)', (648, 107, 648, 108): '(1)'}, {}), '(image_to_detect, name, (left + 6, bottom - 6), font, 1.0, (255,\n 255, 255), 1)', False, 'import cv2\n'), ((652, 35, 652, 67), 'PIL.Image.fromarray', 'Image.fromarray', ({(652, 51, 652, 66): 'image_to_detect'}, {}), '(image_to_detect)', False, 'from PIL import Image\n'), ((655, 28, 655, 42), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n'), ((669, 28, 669, 42), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n'), ((711, 28, 711, 42), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n'), ((817, 16, 817, 47), 'PySimpleGUI.Window', 'sg.Window', ({(817, 26, 817, 46): '"""BKP Media Detector"""'}, {}), "('BKP Media Detector')", True, 'import PySimpleGUI as sg\n'), ((899, 28, 899, 42), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n'), ((986, 28, 986, 42), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n'), ((259, 22, 259, 44), 'tensorflow.get_default_graph', 'tf.get_default_graph', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((270, 31, 270, 53), 'tensorflow.get_default_graph', 'tf.get_default_graph', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((360, 31, 360, 54), 'numpy.asarray', 'np.asarray', ({(360, 42, 360, 53): 'image.shape'}, {}), '(image.shape)', True, 'import numpy as np\n'), ((370, 40, 370, 65), 'numpy.squeeze', 'np.squeeze', ({(370, 51, 370, 64): 'detectedFaces'}, {}), '(detectedFaces)', True, 'import numpy as np\n'), ((371, 29, 371, 56), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((372, 32, 372, 63), 'numpy.maximum', 'np.maximum', ({(372, 43, 372, 59): 'detectedFaces[0]', (372, 61, 372, 62): '0'}, {}), '(detectedFaces[0], 0)', True, 'import numpy as np\n'), ((373, 32, 373, 63), 'numpy.maximum', 'np.maximum', ({(373, 43, 373, 59): 'detectedFaces[1]', (373, 61, 373, 62): '0'}, {}), '(detectedFaces[1], 0)', True, 'import numpy as np\n'), ((374, 32, 374, 73), 'numpy.minimum', 'np.minimum', ({(374, 43, 374, 59): 'detectedFaces[2]', (374, 61, 374, 72): 'img_size[1]'}, {}), '(detectedFaces[2], img_size[1])', True, 'import numpy as np\n'), ((375, 32, 375, 73), 'numpy.minimum', 'np.minimum', ({(375, 43, 375, 59): 'detectedFaces[3]', (375, 61, 375, 72): 'img_size[0]'}, {}), '(detectedFaces[3], img_size[0])', True, 'import numpy as np\n'), ((453, 24, 453, 48), 'numpy.round', 'np.round', ({(453, 33, 453, 47): 'new_w / aspect'}, {}), '(new_w / aspect)', True, 'import numpy as np\n'), ((1024, 48, 1024, 62), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n'), ((203, 23, 203, 48), 'numpy.array', 'np.array', ({(203, 32, 203, 47): 'extracted_frame'}, {}), '(extracted_frame)', True, 'import numpy as np\n'), ((366, 50, 366, 75), 'numpy.squeeze', 'np.squeeze', ({(366, 61, 366, 74): 'detectedFaces'}, {}), '(detectedFaces)', True, 'import numpy as np\n'), ((455, 35, 455, 53), 'numpy.floor', 'np.floor', ({(455, 44, 455, 52): 'pad_vert'}, {}), '(pad_vert)', True, 'import numpy as np\n'), ((455, 67, 455, 84), 'numpy.ceil', 'np.ceil', ({(455, 75, 455, 83): 'pad_vert'}, {}), '(pad_vert)', True, 'import numpy as np\n'), ((459, 24, 459, 48), 'numpy.round', 'np.round', ({(459, 33, 459, 47): 'new_h * aspect'}, {}), '(new_h * aspect)', True, 'import numpy as np\n'), ((608, 28, 608, 42), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n'), ((708, 33, 708, 48), 'pathlib.Path', 'Path', ({(708, 38, 708, 47): 'audiopath'}, {}), '(audiopath)', False, 'from pathlib import Path\n'), ((1025, 55, 1025, 69), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n'), ((267, 43, 267, 65), 'tensorflow.get_default_graph', 'tf.get_default_graph', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((364, 54, 364, 92), 'numpy.squeeze', 'np.squeeze', ({(364, 65, 364, 91): 'detectedFaces[single_face]'}, {}), '(detectedFaces[single_face])', True, 'import numpy as np\n'), ((461, 38, 461, 56), 'numpy.floor', 'np.floor', ({(461, 47, 461, 55): 'pad_horz'}, {}), '(pad_horz)', True, 'import numpy as np\n'), ((461, 70, 461, 87), 'numpy.ceil', 'np.ceil', ({(461, 78, 461, 86): 'pad_horz'}, {}), '(pad_horz)', True, 'import numpy as np\n'), ((638, 37, 638, 53), 'pathlib.Path', 'Path', ({(638, 42, 638, 52): 'image_path'}, {}), '(image_path)', False, 'from pathlib import Path\n'), ((286, 72, 286, 96), 'numpy.expand_dims', 'np.expand_dims', ({(286, 87, 286, 92): 'image', (286, 94, 286, 95): '0'}, {}), '(image, 0)', True, 'import numpy as np\n'), ((500, 23, 500, 54), 'pathlib.Path', 'Path', ({(500, 28, 500, 53): 'image_paths[imagelist[y]]'}, {}), '(image_paths[imagelist[y]])', False, 'from pathlib import Path\n'), ((574, 41, 574, 61), 'pathlib.Path', 'Path', ({(574, 46, 574, 60): 'image_paths[i]'}, {}), '(image_paths[i])', False, 'from pathlib import Path\n'), ((315, 40, 315, 54), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n'), ((650, 61, 650, 77), 'pathlib.Path', 'Path', ({(650, 66, 650, 76): 'image_path'}, {}), '(image_path)', False, 'from pathlib import Path\n'), ((245, 36, 245, 50), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n'), ((308, 53, 308, 69), 'pathlib.Path', 'Path', ({(308, 58, 308, 68): 'image_path'}, {}), '(image_path)', False, 'from pathlib import Path\n'), ((387, 35, 387, 59), 'pathlib.Path', 'Path', ({(387, 40, 387, 58): 'image_paths[index]'}, {}), '(image_paths[index])', False, 'from pathlib import Path\n')] |
stevenwalton/Retro-Learner | src/BruteForce.py | 74586c57b5dd5f6e82abaff99344285731f1fc56 | import time
import retro
import FrameSkip
import TimeLimit
import Brute
class BruteForce():
def __init__(self,
game='Airstriker-Genesis',
max_episode_steps=4500,
timestep_limit=100_000_000,
state=retro.State.DEFAULT,
scenario=None,
save=False,
savename="best.bk2",
fs_skip=4,
render=False,
time=False,
):
self.game = game
self.max_episode_steps = max_episode_steps
self.timestep_limit = timestep_limit
self.state = state
self.scenario = scenario
self.save=save
self.savename = savename
self.fs_skip=fs_skip
self.render=render
self.time=time
if ".bk2" not in self.savename[-4:]:
self.savename += ".bk2"
self.timesteps = 0
self.best_reward = float('-inf')
self.env = retro.make(game=game,
state=state,
use_restricted_actions=retro.Actions.DISCRETE,
scenario=scenario)
self.env = FrameSkip.Frameskip(self.env, skip=self.fs_skip)
self.env = TimeLimit.TimeLimit(self.env, max_episode_steps=self.max_episode_steps)
def start(self):
brute = Brute.Brute(self.env, max_episode_steps=self.max_episode_steps,render=self.render)
if self.time:
startTime = time.time()
while True:
acts, reward = brute.run()
self.timesteps += len(acts)
if reward > self.best_reward:
print(f"New best reward {reward} from {self.best_reward}")
if self.time:
print(f"Elapsed time {time.time() - startTime}")
self.best_reward = reward
if (self.save):
self.env.unwrapped.record_movie(self.savename)
self.env.reset()
for act in acts:
self.env.step(act)
self.env.unwrapped.stop_record()
if self.timesteps > self.timestep_limit:
print("Timed out")
break
| [((38, 19, 41, 48), 'retro.make', 'retro.make', (), '', False, 'import retro\n'), ((42, 19, 42, 67), 'FrameSkip.Frameskip', 'FrameSkip.Frameskip', (), '', False, 'import FrameSkip\n'), ((43, 19, 43, 90), 'TimeLimit.TimeLimit', 'TimeLimit.TimeLimit', (), '', False, 'import TimeLimit\n'), ((46, 16, 46, 98), 'Brute.Brute', 'Brute.Brute', (), '', False, 'import Brute\n'), ((48, 24, 48, 35), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((55, 42, 55, 53), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n')] |
yakhyo/PyTorch-Tutorials | tutorials/04-advanced/03-super-resolution-onnx/main.py | 163287bc735b09c366dbdfa3989e81acaef6fa1f | import io
import numpy as np
import torch.utils.model_zoo as model_zoo
import torch.onnx
import torch.nn as nn
import torch.nn.init as init
# ================================================================ #
# Building the Model #
# ================================================================ #
class SuperResolutionNet(nn.Module):
def __init__(self, upscale_factor, inplace=False):
super(SuperResolutionNet, self).__init__()
self.relu = nn.ReLU(inplace=inplace)
self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=5, padding=2)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(in_channels=64, out_channels=32, kernel_size=3, padding=1)
self.conv4 = nn.Conv2d(in_channels=32, out_channels=upscale_factor ** 2, kernel_size=3, padding=1)
self.pixel_shuffle = nn.PixelShuffle(upscale_factor)
self._initialize_weights()
def forward(self, x):
x = self.relu(self.conv1(x))
x = self.relu(self.conv2(x))
x = self.relu(self.conv3(x))
x = self.pixel_shuffle(self.conv4(x))
return x
def _initialize_weights(self):
init.orthogonal_(self.conv1.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv2.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv3.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv4.weight)
# Creating an instance from SuperResolutionNet
net = SuperResolutionNet(upscale_factor=3)
# ================================================================ #
# Downloading Pretrained Weights #
# ================================================================ #
model_url = 'https://s3.amazonaws.com/pytorch/test_data/export/superres_epoch100-44c6958e.pth'
# Initialize model with the pretrained weights
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
net.load_state_dict(model_zoo.load_url(model_url, map_location=device))
net.eval() # Changing to eval mode to save it onnx format
# onnx input shape: x.shape : (batch_size=1, channel=1, H, W)
# The model expects the Y component of the YCbCr of an image as an input so it has one channel
x = torch.randn(1, 1, 224, 224, requires_grad=True)
onnx_model = net(x)
# Export the onnx model
torch.onnx.export(onnx_model, # model being run
x, # model input (or a tuple for multiple inputs)
"super_resolution.onnx", # where to save the model
export_params=True, # store the trained parameter weights inside the model file
opset_version=10, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['input'], # the model's input names
output_names=['output'], # the model's output names
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
'output': {0: 'batch_size'}})
# ================================================================ #
# Loading ONNX model #
# ================================================================ #
import onnx
import onnxruntime
onnx_model = onnx.load("super_resolution.onnx")
onnx.checker.check_model(onnx_model)
ort_session = onnxruntime.InferenceSession("super_resolution.onnx")
def to_numpy(tensor):
return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()
# compute ONNX Runtime output prediction
ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(x)}
ort_outs = ort_session.run(None, ort_inputs)
# compare ONNX Runtime and PyTorch results
np.testing.assert_allclose(to_numpy(torch_out), ort_outs[0], rtol=1e-03, atol=1e-05)
print("Exported model has been tested with ONNXRuntime, and the result looks good!")
# ================================================================ #
# Reading Original Image and Feed it to Model #
# ================================================================ #
from PIL import Image
import torchvision.transforms as transforms
img = Image.open("../../../cat_224x224.jpg")
resize = transforms.Resize([224, 224])
img = resize(img)
# The model expects the Y component of the YCbCr of an image as an input
img_ycbcr = img.convert('YCbCr')
img_y, img_cb, img_cr = img_ycbcr.split()
to_tensor = transforms.ToTensor()
img_y = to_tensor(img_y)
img_y.unsqueeze_(0)
ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(img_y)}
ort_outs = ort_session.run(None, ort_inputs)
img_out_y = ort_outs[0]
img_out_y = Image.fromarray(np.uint8((img_out_y[0] * 255.0).clip(0, 255)[0]), mode='L')
# get the output image follow post-processing step from PyTorch implementation
output = Image.merge(
"YCbCr",
[img_out_y, img_cb.resize(img_out_y.size, Image.BICUBIC), img_cr.resize(img_out_y.size, Image.BICUBIC), ]
).convert("RGB")
# Save the image, we will compare this with the output image from mobile device
output.save("../../../cat_superres_with_ort.jpg")
| [((80, 13, 80, 47), 'onnx.load', 'onnx.load', ({(80, 23, 80, 46): '"""super_resolution.onnx"""'}, {}), "('super_resolution.onnx')", False, 'import onnx\n'), ((81, 0, 81, 36), 'onnx.checker.check_model', 'onnx.checker.check_model', ({(81, 25, 81, 35): 'onnx_model'}, {}), '(onnx_model)', False, 'import onnx\n'), ((83, 14, 83, 67), 'onnxruntime.InferenceSession', 'onnxruntime.InferenceSession', ({(83, 43, 83, 66): '"""super_resolution.onnx"""'}, {}), "('super_resolution.onnx')", False, 'import onnxruntime\n'), ((106, 6, 106, 44), 'PIL.Image.open', 'Image.open', ({(106, 17, 106, 43): '"""../../../cat_224x224.jpg"""'}, {}), "('../../../cat_224x224.jpg')", False, 'from PIL import Image\n'), ((108, 9, 108, 38), 'torchvision.transforms.Resize', 'transforms.Resize', ({(108, 27, 108, 37): '[224, 224]'}, {}), '([224, 224])', True, 'import torchvision.transforms as transforms\n'), ((116, 12, 116, 33), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ({}, {}), '()', True, 'import torchvision.transforms as transforms\n'), ((53, 20, 53, 70), 'torch.utils.model_zoo.load_url', 'model_zoo.load_url', (), '', True, 'import torch.utils.model_zoo as model_zoo\n'), ((19, 20, 19, 44), 'torch.nn.ReLU', 'nn.ReLU', (), '', True, 'import torch.nn as nn\n'), ((20, 21, 20, 88), 'torch.nn.Conv2d', 'nn.Conv2d', (), '', True, 'import torch.nn as nn\n'), ((21, 21, 21, 89), 'torch.nn.Conv2d', 'nn.Conv2d', (), '', True, 'import torch.nn as nn\n'), ((22, 21, 22, 89), 'torch.nn.Conv2d', 'nn.Conv2d', (), '', True, 'import torch.nn as nn\n'), ((23, 21, 23, 106), 'torch.nn.Conv2d', 'nn.Conv2d', (), '', True, 'import torch.nn as nn\n'), ((24, 29, 24, 60), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', ({(24, 45, 24, 59): 'upscale_factor'}, {}), '(upscale_factor)', True, 'import torch.nn as nn\n'), ((39, 8, 39, 43), 'torch.nn.init.orthogonal_', 'init.orthogonal_', ({(39, 25, 39, 42): 'self.conv4.weight'}, {}), '(self.conv4.weight)', True, 'import torch.nn.init as init\n'), ((36, 44, 36, 71), 'torch.nn.init.calculate_gain', 'init.calculate_gain', ({(36, 64, 36, 70): '"""relu"""'}, {}), "('relu')", True, 'import torch.nn.init as init\n'), ((37, 44, 37, 71), 'torch.nn.init.calculate_gain', 'init.calculate_gain', ({(37, 64, 37, 70): '"""relu"""'}, {}), "('relu')", True, 'import torch.nn.init as init\n'), ((38, 44, 38, 71), 'torch.nn.init.calculate_gain', 'init.calculate_gain', ({(38, 64, 38, 70): '"""relu"""'}, {}), "('relu')", True, 'import torch.nn.init as init\n')] |
revvsales/python-docx-1 | features/steps/section.py | 5b3ff2b828cc30f1567cb1682a8cb399143732d7 | # encoding: utf-8
"""
Step implementations for section-related features
"""
from __future__ import absolute_import, print_function, unicode_literals
from behave import given, then, when
from docx import Document
from docx.enum.section import WD_ORIENT, WD_SECTION
from docx.section import Section
from docx.shared import Inches
from helpers import test_docx
# given ====================================================
@given("a Section object as section")
def given_a_Section_object_as_section(context):
context.section = Document(test_docx("sct-section-props")).sections[-1]
@given("a Section object {with_or_without} a distinct first-page header as section")
def given_a_Section_object_with_or_without_first_page_header(context, with_or_without):
section_idx = {"with": 1, "without": 0}[with_or_without]
context.section = Document(test_docx("sct-first-page-hdrftr")).sections[section_idx]
@given('a section collection containing 3 sections')
def given_a_section_collection_containing_3_sections(context):
document = Document(test_docx('doc-access-sections'))
context.sections = document.sections
@given('a section having known page dimension')
def given_a_section_having_known_page_dimension(context):
document = Document(test_docx('sct-section-props'))
context.section = document.sections[-1]
@given('a section having known page margins')
def given_a_section_having_known_page_margins(context):
document = Document(test_docx('sct-section-props'))
context.section = document.sections[0]
@given('a section having start type {start_type}')
def given_a_section_having_start_type(context, start_type):
section_idx = {
'CONTINUOUS': 0,
'NEW_PAGE': 1,
'ODD_PAGE': 2,
'EVEN_PAGE': 3,
'NEW_COLUMN': 4,
}[start_type]
document = Document(test_docx('sct-section-props'))
context.section = document.sections[section_idx]
@given('a section known to have {orientation} orientation')
def given_a_section_having_known_orientation(context, orientation):
section_idx = {
'landscape': 0,
'portrait': 1
}[orientation]
document = Document(test_docx('sct-section-props'))
context.section = document.sections[section_idx]
# when =====================================================
@when("I assign {bool_val} to section.different_first_page_header_footer")
def when_I_assign_value_to_section_different_first_page_hdrftr(context, bool_val):
context.section.different_first_page_header_footer = eval(bool_val)
@when('I set the {margin_side} margin to {inches} inches')
def when_I_set_the_margin_side_length(context, margin_side, inches):
prop_name = {
'left': 'left_margin',
'right': 'right_margin',
'top': 'top_margin',
'bottom': 'bottom_margin',
'gutter': 'gutter',
'header': 'header_distance',
'footer': 'footer_distance',
}[margin_side]
new_value = Inches(float(inches))
setattr(context.section, prop_name, new_value)
@when('I set the section orientation to {orientation}')
def when_I_set_the_section_orientation(context, orientation):
new_orientation = {
'WD_ORIENT.PORTRAIT': WD_ORIENT.PORTRAIT,
'WD_ORIENT.LANDSCAPE': WD_ORIENT.LANDSCAPE,
'None': None,
}[orientation]
context.section.orientation = new_orientation
@when('I set the section page height to {y} inches')
def when_I_set_the_section_page_height_to_y_inches(context, y):
context.section.page_height = Inches(float(y))
@when('I set the section page width to {x} inches')
def when_I_set_the_section_page_width_to_x_inches(context, x):
context.section.page_width = Inches(float(x))
@when('I set the section start type to {start_type}')
def when_I_set_the_section_start_type_to_start_type(context, start_type):
new_start_type = {
'None': None,
'CONTINUOUS': WD_SECTION.CONTINUOUS,
'EVEN_PAGE': WD_SECTION.EVEN_PAGE,
'NEW_COLUMN': WD_SECTION.NEW_COLUMN,
'NEW_PAGE': WD_SECTION.NEW_PAGE,
'ODD_PAGE': WD_SECTION.ODD_PAGE,
}[start_type]
context.section.start_type = new_start_type
# then =====================================================
@then('I can access a section by index')
def then_I_can_access_a_section_by_index(context):
sections = context.sections
for idx in range(3):
section = sections[idx]
assert isinstance(section, Section)
@then('I can iterate over the sections')
def then_I_can_iterate_over_the_sections(context):
sections = context.sections
actual_count = 0
for section in sections:
actual_count += 1
assert isinstance(section, Section)
assert actual_count == 3
@then('len(sections) is 3')
def then_len_sections_is_3(context):
sections = context.sections
assert len(sections) == 3, (
'expected len(sections) of 3, got %s' % len(sections)
)
@then("section.different_first_page_header_footer is {bool_val}")
def then_section_different_first_page_header_footer_is(context, bool_val):
actual = context.section.different_first_page_header_footer
expected = eval(bool_val)
assert actual == expected, (
"section.different_first_page_header_footer is %s" % actual
)
@then("section.even_page_footer is a _Footer object")
def then_section_even_page_footer_is_a_Footer_object(context):
actual = type(context.section.even_page_footer).__name__
expected = "_Footer"
assert actual == expected, "section.even_page_footer is a %s object" % actual
@then("section.even_page_header is a _Header object")
def then_section_even_page_header_is_a_Header_object(context):
actual = type(context.section.even_page_header).__name__
expected = "_Header"
assert actual == expected, "section.even_page_header is a %s object" % actual
@then("section.first_page_footer is a _Footer object")
def then_section_first_page_footer_is_a_Footer_object(context):
actual = type(context.section.first_page_footer).__name__
expected = "_Footer"
assert actual == expected, "section.first_page_footer is a %s object" % actual
@then("section.first_page_header is a _Header object")
def then_section_first_page_header_is_a_Header_object(context):
actual = type(context.section.first_page_header).__name__
expected = "_Header"
assert actual == expected, "section.first_page_header is a %s object" % actual
@then("section.footer is a _Footer object")
def then_section_footer_is_a_Footer_object(context):
actual = type(context.section.footer).__name__
expected = "_Footer"
assert actual == expected, "section.footer is a %s object" % actual
@then("section.header is a _Header object")
def then_section_header_is_a_Header_object(context):
actual = type(context.section.header).__name__
expected = "_Header"
assert actual == expected, "section.header is a %s object" % actual
@then("section.{propname}.is_linked_to_previous is True")
def then_section_hdrftr_prop_is_linked_to_previous_is_True(context, propname):
actual = getattr(context.section, propname).is_linked_to_previous
expected = True
assert actual == expected, (
"section.%s.is_linked_to_previous is %s" % (propname, actual)
)
@then('the reported {margin_side} margin is {inches} inches')
def then_the_reported_margin_is_inches(context, margin_side, inches):
prop_name = {
'left': 'left_margin',
'right': 'right_margin',
'top': 'top_margin',
'bottom': 'bottom_margin',
'gutter': 'gutter',
'header': 'header_distance',
'footer': 'footer_distance',
}[margin_side]
expected_value = Inches(float(inches))
actual_value = getattr(context.section, prop_name)
assert actual_value == expected_value
@then('the reported page orientation is {orientation}')
def then_the_reported_page_orientation_is_orientation(context, orientation):
expected_value = {
'WD_ORIENT.LANDSCAPE': WD_ORIENT.LANDSCAPE,
'WD_ORIENT.PORTRAIT': WD_ORIENT.PORTRAIT,
}[orientation]
assert context.section.orientation == expected_value
@then('the reported page width is {x} inches')
def then_the_reported_page_width_is_width(context, x):
assert context.section.page_width == Inches(float(x))
@then('the reported page height is {y} inches')
def then_the_reported_page_height_is_11_inches(context, y):
assert context.section.page_height == Inches(float(y))
@then('the reported section start type is {start_type}')
def then_the_reported_section_start_type_is_type(context, start_type):
expected_start_type = {
'CONTINUOUS': WD_SECTION.CONTINUOUS,
'EVEN_PAGE': WD_SECTION.EVEN_PAGE,
'NEW_COLUMN': WD_SECTION.NEW_COLUMN,
'NEW_PAGE': WD_SECTION.NEW_PAGE,
'ODD_PAGE': WD_SECTION.ODD_PAGE,
}[start_type]
assert context.section.start_type == expected_start_type
| [((21, 1, 21, 37), 'behave.given', 'given', ({(21, 7, 21, 36): '"""a Section object as section"""'}, {}), "('a Section object as section')", False, 'from behave import given, then, when\n'), ((26, 1, 26, 84), 'behave.given', 'given', ({(26, 7, 26, 83): '"""a Section object {with_or_without} a distinct first-page header as section"""'}, {}), "(\n 'a Section object {with_or_without} a distinct first-page header as section'\n )", False, 'from behave import given, then, when\n'), ((32, 1, 32, 52), 'behave.given', 'given', ({(32, 7, 32, 51): '"""a section collection containing 3 sections"""'}, {}), "('a section collection containing 3 sections')", False, 'from behave import given, then, when\n'), ((38, 1, 38, 47), 'behave.given', 'given', ({(38, 7, 38, 46): '"""a section having known page dimension"""'}, {}), "('a section having known page dimension')", False, 'from behave import given, then, when\n'), ((44, 1, 44, 45), 'behave.given', 'given', ({(44, 7, 44, 44): '"""a section having known page margins"""'}, {}), "('a section having known page margins')", False, 'from behave import given, then, when\n'), ((50, 1, 50, 50), 'behave.given', 'given', ({(50, 7, 50, 49): '"""a section having start type {start_type}"""'}, {}), "('a section having start type {start_type}')", False, 'from behave import given, then, when\n'), ((63, 1, 63, 59), 'behave.given', 'given', ({(63, 7, 63, 58): '"""a section known to have {orientation} orientation"""'}, {}), "('a section known to have {orientation} orientation')", False, 'from behave import given, then, when\n'), ((75, 1, 75, 74), 'behave.when', 'when', ({(75, 6, 75, 73): '"""I assign {bool_val} to section.different_first_page_header_footer"""'}, {}), "('I assign {bool_val} to section.different_first_page_header_footer')", False, 'from behave import given, then, when\n'), ((80, 1, 80, 58), 'behave.when', 'when', ({(80, 6, 80, 57): '"""I set the {margin_side} margin to {inches} inches"""'}, {}), "('I set the {margin_side} margin to {inches} inches')", False, 'from behave import given, then, when\n'), ((95, 1, 95, 55), 'behave.when', 'when', ({(95, 6, 95, 54): '"""I set the section orientation to {orientation}"""'}, {}), "('I set the section orientation to {orientation}')", False, 'from behave import given, then, when\n'), ((105, 1, 105, 52), 'behave.when', 'when', ({(105, 6, 105, 51): '"""I set the section page height to {y} inches"""'}, {}), "('I set the section page height to {y} inches')", False, 'from behave import given, then, when\n'), ((110, 1, 110, 51), 'behave.when', 'when', ({(110, 6, 110, 50): '"""I set the section page width to {x} inches"""'}, {}), "('I set the section page width to {x} inches')", False, 'from behave import given, then, when\n'), ((115, 1, 115, 53), 'behave.when', 'when', ({(115, 6, 115, 52): '"""I set the section start type to {start_type}"""'}, {}), "('I set the section start type to {start_type}')", False, 'from behave import given, then, when\n'), ((130, 1, 130, 40), 'behave.then', 'then', ({(130, 6, 130, 39): '"""I can access a section by index"""'}, {}), "('I can access a section by index')", False, 'from behave import given, then, when\n'), ((138, 1, 138, 40), 'behave.then', 'then', ({(138, 6, 138, 39): '"""I can iterate over the sections"""'}, {}), "('I can iterate over the sections')", False, 'from behave import given, then, when\n'), ((148, 1, 148, 27), 'behave.then', 'then', ({(148, 6, 148, 26): '"""len(sections) is 3"""'}, {}), "('len(sections) is 3')", False, 'from behave import given, then, when\n'), ((156, 1, 156, 65), 'behave.then', 'then', ({(156, 6, 156, 64): '"""section.different_first_page_header_footer is {bool_val}"""'}, {}), "('section.different_first_page_header_footer is {bool_val}')", False, 'from behave import given, then, when\n'), ((165, 1, 165, 53), 'behave.then', 'then', ({(165, 6, 165, 52): '"""section.even_page_footer is a _Footer object"""'}, {}), "('section.even_page_footer is a _Footer object')", False, 'from behave import given, then, when\n'), ((172, 1, 172, 53), 'behave.then', 'then', ({(172, 6, 172, 52): '"""section.even_page_header is a _Header object"""'}, {}), "('section.even_page_header is a _Header object')", False, 'from behave import given, then, when\n'), ((179, 1, 179, 54), 'behave.then', 'then', ({(179, 6, 179, 53): '"""section.first_page_footer is a _Footer object"""'}, {}), "('section.first_page_footer is a _Footer object')", False, 'from behave import given, then, when\n'), ((186, 1, 186, 54), 'behave.then', 'then', ({(186, 6, 186, 53): '"""section.first_page_header is a _Header object"""'}, {}), "('section.first_page_header is a _Header object')", False, 'from behave import given, then, when\n'), ((193, 1, 193, 43), 'behave.then', 'then', ({(193, 6, 193, 42): '"""section.footer is a _Footer object"""'}, {}), "('section.footer is a _Footer object')", False, 'from behave import given, then, when\n'), ((200, 1, 200, 43), 'behave.then', 'then', ({(200, 6, 200, 42): '"""section.header is a _Header object"""'}, {}), "('section.header is a _Header object')", False, 'from behave import given, then, when\n'), ((207, 1, 207, 57), 'behave.then', 'then', ({(207, 6, 207, 56): '"""section.{propname}.is_linked_to_previous is True"""'}, {}), "('section.{propname}.is_linked_to_previous is True')", False, 'from behave import given, then, when\n'), ((216, 1, 216, 61), 'behave.then', 'then', ({(216, 6, 216, 60): '"""the reported {margin_side} margin is {inches} inches"""'}, {}), "('the reported {margin_side} margin is {inches} inches')", False, 'from behave import given, then, when\n'), ((232, 1, 232, 55), 'behave.then', 'then', ({(232, 6, 232, 54): '"""the reported page orientation is {orientation}"""'}, {}), "('the reported page orientation is {orientation}')", False, 'from behave import given, then, when\n'), ((241, 1, 241, 46), 'behave.then', 'then', ({(241, 6, 241, 45): '"""the reported page width is {x} inches"""'}, {}), "('the reported page width is {x} inches')", False, 'from behave import given, then, when\n'), ((246, 1, 246, 47), 'behave.then', 'then', ({(246, 6, 246, 46): '"""the reported page height is {y} inches"""'}, {}), "('the reported page height is {y} inches')", False, 'from behave import given, then, when\n'), ((251, 1, 251, 56), 'behave.then', 'then', ({(251, 6, 251, 55): '"""the reported section start type is {start_type}"""'}, {}), "('the reported section start type is {start_type}')", False, 'from behave import given, then, when\n'), ((34, 24, 34, 56), 'helpers.test_docx', 'test_docx', ({(34, 34, 34, 55): '"""doc-access-sections"""'}, {}), "('doc-access-sections')", False, 'from helpers import test_docx\n'), ((40, 24, 40, 54), 'helpers.test_docx', 'test_docx', ({(40, 34, 40, 53): '"""sct-section-props"""'}, {}), "('sct-section-props')", False, 'from helpers import test_docx\n'), ((46, 24, 46, 54), 'helpers.test_docx', 'test_docx', ({(46, 34, 46, 53): '"""sct-section-props"""'}, {}), "('sct-section-props')", False, 'from helpers import test_docx\n'), ((59, 24, 59, 54), 'helpers.test_docx', 'test_docx', ({(59, 34, 59, 53): '"""sct-section-props"""'}, {}), "('sct-section-props')", False, 'from helpers import test_docx\n'), ((69, 24, 69, 54), 'helpers.test_docx', 'test_docx', ({(69, 34, 69, 53): '"""sct-section-props"""'}, {}), "('sct-section-props')", False, 'from helpers import test_docx\n'), ((23, 31, 23, 61), 'helpers.test_docx', 'test_docx', ({(23, 41, 23, 60): '"""sct-section-props"""'}, {}), "('sct-section-props')", False, 'from helpers import test_docx\n'), ((29, 31, 29, 65), 'helpers.test_docx', 'test_docx', ({(29, 41, 29, 64): '"""sct-first-page-hdrftr"""'}, {}), "('sct-first-page-hdrftr')", False, 'from helpers import test_docx\n')] |
seberg/scipy | scipy/sparse/csgraph/_laplacian.py | d8081cdd40ed8cbebd5905c0ad6c323c57d5da6e | """
Laplacian of a compressed-sparse graph
"""
# Authors: Aric Hagberg <[email protected]>
# Gael Varoquaux <[email protected]>
# Jake Vanderplas <[email protected]>
# License: BSD
import numpy as np
from scipy.sparse import isspmatrix, coo_matrix
###############################################################################
# Graph laplacian
def laplacian(csgraph, normed=False, return_diag=False):
""" Return the Laplacian matrix of a directed graph.
For non-symmetric graphs the out-degree is used in the computation.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute normalized Laplacian.
return_diag : bool, optional
If True, then return diagonal as well as laplacian.
Returns
-------
lap : ndarray
The N x N laplacian matrix of graph.
diag : ndarray
The length-N diagonal of the laplacian matrix.
diag is returned only if return_diag is True.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchoff matrix" or the "admittance matrix", and is useful in many
parts of spectral graph theory. In particular, the eigen-decomposition
of the laplacian matrix can give insight into many properties of the graph.
For non-symmetric directed graphs, the laplacian is computed using the
out-degree of each node.
Examples
--------
>>> from scipy.sparse import csgraph
>>> G = np.arange(5) * np.arange(5)[:, np.newaxis]
>>> G
array([[ 0, 0, 0, 0, 0],
[ 0, 1, 2, 3, 4],
[ 0, 2, 4, 6, 8],
[ 0, 3, 6, 9, 12],
[ 0, 4, 8, 12, 16]])
>>> csgraph.laplacian(G, normed=False)
array([[ 0, 0, 0, 0, 0],
[ 0, 9, -2, -3, -4],
[ 0, -2, 16, -6, -8],
[ 0, -3, -6, 21, -12],
[ 0, -4, -8, -12, 24]])
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError('csgraph must be a square matrix or array')
if normed and (np.issubdtype(csgraph.dtype, np.int)
or np.issubdtype(csgraph.dtype, np.uint)):
csgraph = csgraph.astype(np.float)
if isspmatrix(csgraph):
return _laplacian_sparse(csgraph, normed=normed,
return_diag=return_diag)
else:
return _laplacian_dense(csgraph, normed=normed,
return_diag=return_diag)
def _laplacian_sparse(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
if not graph.format == 'coo':
lap = (-graph).tocoo()
else:
lap = -graph.copy()
diag_mask = (lap.row == lap.col)
if not diag_mask.sum() == n_nodes:
# The sparsity pattern of the matrix has holes on the diagonal,
# we need to fix that
diag_idx = lap.row[diag_mask]
diagonal_holes = list(set(range(n_nodes)).difference(
diag_idx))
new_data = np.concatenate([lap.data, np.ones(len(diagonal_holes))])
new_row = np.concatenate([lap.row, diagonal_holes])
new_col = np.concatenate([lap.col, diagonal_holes])
lap = coo_matrix((new_data, (new_row, new_col)), shape=lap.shape)
diag_mask = (lap.row == lap.col)
lap.data[diag_mask] = 0
w = -np.asarray(lap.sum(axis=1)).squeeze()
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap.data /= w[lap.row]
lap.data /= w[lap.col]
lap.data[diag_mask] = (1 - w_zeros[lap.row[diag_mask]]).astype(lap.data.dtype)
else:
lap.data[diag_mask] = w[lap.row[diag_mask]]
if return_diag:
return lap, w
return lap
def _laplacian_dense(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
lap = -np.asarray(graph) # minus sign leads to a copy
# set diagonal to zero
lap.flat[::n_nodes + 1] = 0
w = -lap.sum(axis=0)
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap /= w
lap /= w[:, np.newaxis]
lap.flat[::n_nodes + 1] = 1 - w_zeros
else:
lap.flat[::n_nodes + 1] = w
if return_diag:
return lap, w
return lap
| [((72, 7, 72, 26), 'scipy.sparse.isspmatrix', 'isspmatrix', ({(72, 18, 72, 25): 'csgraph'}, {}), '(csgraph)', False, 'from scipy.sparse import isspmatrix, coo_matrix\n'), ((94, 18, 94, 59), 'numpy.concatenate', 'np.concatenate', ({(94, 33, 94, 58): '[lap.row, diagonal_holes]'}, {}), '([lap.row, diagonal_holes])', True, 'import numpy as np\n'), ((95, 18, 95, 59), 'numpy.concatenate', 'np.concatenate', ({(95, 33, 95, 58): '[lap.col, diagonal_holes]'}, {}), '([lap.col, diagonal_holes])', True, 'import numpy as np\n'), ((96, 14, 96, 73), 'scipy.sparse.coo_matrix', 'coo_matrix', (), '', False, 'from scipy.sparse import isspmatrix, coo_matrix\n'), ((102, 12, 102, 22), 'numpy.sqrt', 'np.sqrt', ({(102, 20, 102, 21): 'w'}, {}), '(w)', True, 'import numpy as np\n'), ((118, 11, 118, 28), 'numpy.asarray', 'np.asarray', ({(118, 22, 118, 27): 'graph'}, {}), '(graph)', True, 'import numpy as np\n'), ((124, 12, 124, 22), 'numpy.sqrt', 'np.sqrt', ({(124, 20, 124, 21): 'w'}, {}), '(w)', True, 'import numpy as np\n'), ((68, 19, 68, 55), 'numpy.issubdtype', 'np.issubdtype', ({(68, 33, 68, 46): 'csgraph.dtype', (68, 48, 68, 54): 'np.int'}, {}), '(csgraph.dtype, np.int)', True, 'import numpy as np\n'), ((69, 22, 69, 59), 'numpy.issubdtype', 'np.issubdtype', ({(69, 36, 69, 49): 'csgraph.dtype', (69, 51, 69, 58): 'np.uint'}, {}), '(csgraph.dtype, np.uint)', True, 'import numpy as np\n')] |
kuasha/peregrine | samples/barebone/settings.py | b3dd92146d26fe9e4ea589868431b590324b47d1 | import os
import logging
from collections import namedtuple
from Crypto.PublicKey import RSA
from tornado import gen
from tornado import concurrent
from cosmos.rbac.object import *
from cosmos.service import OBSERVER_PROCESSOR
DEBUG = True
DB_HOST = "127.0.0.1"
DB_NAME = "cosmos"
DB_PORT = 27017
DB_USER_NAME = None
DB_USER_PASSWORD = None
LOG_DB_HOST = "127.0.0.1"
LOG_DB_NAME = "cosmos"
LOG_COL_NAME = "log"
LOG_DB_PORT = 27017
LOG_LEVEL = logging.DEBUG
LOG_DB_USER_NAME = None
LOG_DB_USER_PASSWORD = None
STATIC_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "app")
TEMPLATE_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "templates")
INDEX_HTML_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "app/index.html")
LOGIN_HTML_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "templates/login.html")
WEB_SERVER_LISTEN_PORT = 8080
DB_CHANGE_PROCESSOR_ENDPOINT_FORMAT = "http://localhost:{0}/handlechange"
#TODO: You MUST change the following values
COOKIE_SECRET = "+8/YqtEUQfiYLUdO2iJ2OyzHHFSADEuKvKYwFqemFas="
HMAC_KEY = "+8/YqtEUQfiYLUdO2iJ2OyzHIFSAKEuKvKYwFqemFas="
facebook_client_id='000000000000000'
facebook_client_secret='00000000000000000000000000000000'
facebook_scope = "email,public_profile,user_friends"
facebook_redirect_uri = None
DEFAULT_LOGIN_NEXT_URI = "/"
"""
# pip install pycrypto for Crypto
# then from python console generate private_pem and public_pen and assign to SERVICE_PRIVATE_KEY and SERVICE_PUBLIC_KEY
import Crypto.PublicKey.RSA as RSA
key = RSA.generate(2048)
private_pem = key.exportKey()
public_pem = key.publickey().exportKey()
"""
# TODO: set both keys below. Private key backup must be kept in a secure place and should never be shared
# If private key is compromised, this service and all other services that trust this will be compromised
# Public key is to share publicly for verification
SERVICE_PRIVATE_KEY = None
SERVICE_PUBLIC_KEY = None
directory_listing_allowed = True
CONFIGURE_LOG = False
START_WEB_SERVER = True
START_OBJECT_CHANGE_MONITOR = False
GOOGLE_OAUTH2_CLIENT_ID = None
GOOGLE_OAUTH2_CLIENT_SECRET = None
GOOGLE_OAUTH2_REDIRECT_URI = None
GITHUB_CLIENT_ID = None
GITHUB_CLIENT_SECRET = None
GITHUB_OAUTH2_CALLBACK_URI = None
USERS_IDENTITY_COL_NAME = "cosmos.users.identity"
USERS_PROFILE_FB_COL_NAME = "cosmos.users.profile.facebook"
USERS_FB_FRIENDS_COL_NAME = "cosmos.users.facebook.friends"
login_url = "/login/"
OAUTH2_SERVICE_URL = r"/(?P<tenant_id>[^\/]+)/oauth2/(?P<function>[^\/]+)/"
OAUTH2_PRIVATE_KEY_PEM = b'-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEAl0RIYISOe+9F8dRkm+XQrdaVsn/d3GjufnBnFARRgceu+E6q\nWLlptI5arhckFyXjDOAUEuMnOwmISfeXHrIIp4BU6RMjqRw6ciaIhI7e3LSn5fQ7\nOwCywUaHlUkyq+zQynfH77lUC95YumyUQzGVfdiwQw8XZZYDo2wAFMKJa8heo38Z\nQ0HT788VrcuSa1f4PY9i/wRHXF+xp/9NWUE7wER8eNJjqKxkm0EUKYuB23vUFLHh\n8PG7DiATUlCCpV5txhHcNXa2iEoOGecdWg8Yk5Qs2Gq9aqacJGcgfFK9DN+2/yLn\nFEj+xMVPhB2ynILoJ9N+lfA3TE6nWVKiuriXBQIDAQABAoIBAQCAX2CVGKnbH+ra\nGofvjg+VGCEexUlBvoN4Jmg0Ip4RZ6dj70690UyWAKGQUO89/dc8nAYtKT2n6qUR\nMN+9GxYhINXun2GKKPyo127QIHeeEmrSynxhzGvnfrWdyesI4QcobJLvLPbYw6/F\nNlR02eWmUXj00B/pBHC+Be/jrlz1bF5Gwbw/RINzEJPOxVfaN2D31lotetx5WnV7\nXrTxR5ONpCnwbK8phH4/vQL3rv+ZJgKVhRM8uqd+auW5Lp57y36JFXb+g5SmkFo3\nq+mB2CfMkyip8zpJGDyyVo8XiI1jKieqaiimZ4zpJZwkClBzYsFmio60f9smMGYB\n+nQCX5iZAoGBAL6WtY9BSL0hIxMIwDh4C87rORMmy8ZW5sl91wdFHmjnqlc2Q2yS\n3uVwK32BvxQCTq6FXNRoqYO0xHSrrupSRTJD5KT9EoxpaGlqi1MSB6U6o7r41bSb\nhNwcjKJ40OSABZ/YzATOwq9+AfgU+pMZD+WNlzesYL+7QIPHyKXdwrPLAoGBAMsu\ntcUadzsZEmaaSW5xtouyZF5tWPadB6VZ0Gney8x6uWQ2+ZGLv0QRIxJP0f4cBTkY\nsPx5pUZuo7oaDzCaRH9cV2VJFBahsGrFqcsexVsKh8CfZEMD1PBptodD1Cialr9M\nL0RdSu+1lmcfRqxOXSlaMSHml/cqfOjfHOj3RaZvAoGAEG2LLtLwwySlElHxx6xJ\nUEekPstcSzdYY0vOihjiGybE3wmVXDl4rwwxI3tYjg/42kAylTiETA771BasWBRJ\nVKDXh4Us4R+A2X1OjxWBxTM9w7MJMK0rEZIAaUzCrL+APJwCUfPEgj35S3n7c0x4\nu0+uFiVsnXo1gGZrHCj2TGsCgYEApm3Ccos1MvFcgzLKB2+ZqWAcmsRS5N7Hjoe9\nEZtvsDSuewoU70VbDDRFWBCN3+mv1Y8GGijCWqjx79S8sIEMro5DADIWBFu5GByE\n8l5oJiTAAeYNyF7xI2RUIQRMWl4WMOgEp6kLYsKJSjryNt2Rrfe02yH5RHpHCrEH\nC0TQhn0CgYB0iyjs20bdGYYWNTMlSYPtf8LVhUktvGYyytA/sepRXUe13T87vjCc\nvD3utXPsuaBVGhloE7Dk5YHJdar4n5UcLITNJnu1TyRM4binlzbU4rByxVjclaSX\nGB0O/DCgCsgNFK+LFKf/N1EhRxwJKy+BLVWCIshsAxNv26u296I9jA==\n-----END RSA PRIVATE KEY-----'
OAUTH2_PUBLIC_KEY_PEM = b'-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAl0RIYISOe+9F8dRkm+XQ\nrdaVsn/d3GjufnBnFARRgceu+E6qWLlptI5arhckFyXjDOAUEuMnOwmISfeXHrII\np4BU6RMjqRw6ciaIhI7e3LSn5fQ7OwCywUaHlUkyq+zQynfH77lUC95YumyUQzGV\nfdiwQw8XZZYDo2wAFMKJa8heo38ZQ0HT788VrcuSa1f4PY9i/wRHXF+xp/9NWUE7\nwER8eNJjqKxkm0EUKYuB23vUFLHh8PG7DiATUlCCpV5txhHcNXa2iEoOGecdWg8Y\nk5Qs2Gq9aqacJGcgfFK9DN+2/yLnFEj+xMVPhB2ynILoJ9N+lfA3TE6nWVKiuriX\nBQIDAQAB\n-----END PUBLIC KEY-----'
OAUTH2_TOKEN_EXPIRY_SECONDS = 600
TENANT_ID = 'cosmosframework.com'
OAUTH2_TRUSTED_REDIRECT_URLS = ['http://localhost:8080/oauth2client/authorize/']
AUTH_PUBLIC_KEY_PEM_URL = r"/(?P<tenant_id>[^\/]+)/auth/key/"
#TODO: You should remove this processon in production environment
def test_observer(user, object_service, object_name, data, access_type, columns = None, *args, **kwargs):
assert object_name == "test"
assert access_type == AccessType.READ or access_type == AccessType.INSERT or access_type == AccessType.UPDATE or access_type == AccessType.DELETE
logging.info("Test object observer is called with [{}, {}, {}, {}, {}, {}].".format(user, object_service, object_name, data, access_type, columns))
if AccessType.INSERT == access_type:
val = concurrent.Future()
val.set_result(data)
return (val)
if AccessType.UPDATE == access_type or AccessType.DELETE == access_type:
r = ({"error": None, "n": 1, "ok": 1, "updatedExisting": 1})
val = concurrent.Future()
val.set_result({"_id":r})
return (val)
find_one = kwargs.get("find_one", False)
if find_one:
val = concurrent.Future()
val.set_result({"_id":data})
return (val)
else:
Result = namedtuple("CosmosEmptyResultSet", "fetch_next")
val = concurrent.Future()
val.set_result(False)
return (Result(fetch_next=val))
observers = [
{
"object_name": "test",
"function": test_observer,
"access": [AccessType.READ, AccessType.INSERT, AccessType.UPDATE, AccessType.DELETE],
"type": OBSERVER_PROCESSOR
}
]
try:
from local_settings import *
except ImportError:
pass
if DB_USER_NAME and DB_USER_PASSWORD:
DATABASE_URI = "mongodb://"+ DB_USER_NAME + ":"+ DB_USER_PASSWORD +"@"+ DB_HOST+":"+str(DB_PORT)+"/"+DB_NAME
else:
DATABASE_URI = "mongodb://"+DB_HOST+":"+str(DB_PORT)
if LOG_DB_USER_NAME and LOG_DB_USER_PASSWORD:
LOG_DATABASE_URI = "mongodb://"+ LOG_DB_USER_NAME + ":"+ LOG_DB_USER_PASSWORD +"@"+ LOG_DB_HOST+":"+str(LOG_DB_PORT)+"/"+LOG_DB_NAME
else:
LOG_DATABASE_URI = "mongodb://"+ LOG_DB_HOST+":"+str(LOG_DB_PORT)
GOOGLE_OAUTH2_SETTINGS = {"key": GOOGLE_OAUTH2_CLIENT_ID, "secret": GOOGLE_OAUTH2_CLIENT_SECRET, "redirect_uri": GOOGLE_OAUTH2_REDIRECT_URI}
GITHUB_OAUTH_SETTINGS = {"client_id": GITHUB_CLIENT_ID, "secret": GITHUB_CLIENT_SECRET, "redirect_uri": GITHUB_OAUTH2_CALLBACK_URI}
| [((29, 43, 29, 69), 'os.path.realpath', 'os.path.realpath', ({(29, 60, 29, 68): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((30, 45, 30, 71), 'os.path.realpath', 'os.path.realpath', ({(30, 62, 30, 70): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((31, 47, 31, 73), 'os.path.realpath', 'os.path.realpath', ({(31, 64, 31, 72): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((32, 47, 32, 73), 'os.path.realpath', 'os.path.realpath', ({(32, 64, 32, 72): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((101, 14, 101, 33), 'tornado.concurrent.Future', 'concurrent.Future', ({}, {}), '()', False, 'from tornado import concurrent\n'), ((107, 14, 107, 33), 'tornado.concurrent.Future', 'concurrent.Future', ({}, {}), '()', False, 'from tornado import concurrent\n'), ((113, 14, 113, 33), 'tornado.concurrent.Future', 'concurrent.Future', ({}, {}), '()', False, 'from tornado import concurrent\n'), ((117, 17, 117, 65), 'collections.namedtuple', 'namedtuple', ({(117, 28, 117, 50): '"""CosmosEmptyResultSet"""', (117, 52, 117, 64): '"""fetch_next"""'}, {}), "('CosmosEmptyResultSet', 'fetch_next')", False, 'from collections import namedtuple\n'), ((118, 14, 118, 33), 'tornado.concurrent.Future', 'concurrent.Future', ({}, {}), '()', False, 'from tornado import concurrent\n')] |
A-Hilaly/zilean | zilean/system/zilean_migrator.py | 2b2e87969a0d8064e8b92b07c346a4006f93c795 | from .utils.migrations import (migrate_database_from,
migrate_machine_from,
zilean_rollback_database_backup,
zilean_rollback_machine_backup)
class ZileanMigrator(object):
pass
| [] |
DionysisChristopoulos/google-research | coltran/run.py | 7f59ef421beef32ca16c2a7215be74f7eba01a0f | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ColTran: Training and Continuous Evaluation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import os
import time
from absl import app
from absl import flags
from absl import logging
from ml_collections import config_flags
import tensorflow as tf
import tensorflow_datasets as tfds
from coltran import datasets
from coltran.models import colorizer
from coltran.models import upsampler
from coltran.utils import train_utils
# pylint: disable=g-direct-tensorflow-import
# pylint: disable=missing-docstring
# pylint: disable=not-callable
# pylint: disable=g-long-lambda
flags.DEFINE_enum('mode', 'train', [
'train', 'eval_train', 'eval_valid', 'eval_test'], 'Operation mode.')
flags.DEFINE_string('logdir', '/tmp/svt', 'Main directory for logs.')
flags.DEFINE_string('master', 'local',
'BNS name of the TensorFlow master to use.')
flags.DEFINE_enum('accelerator_type', 'GPU', ['CPU', 'GPU', 'TPU'],
'Hardware type.')
flags.DEFINE_enum('dataset', 'imagenet', ['imagenet', 'custom'], 'Dataset')
flags.DEFINE_string('data_dir', None, 'Data directory for custom images.')
flags.DEFINE_string('tpu_worker_name', 'tpu_worker', 'Name of the TPU worker.')
flags.DEFINE_string(
'pretrain_dir', None, 'Finetune from a pretrained checkpoint.')
flags.DEFINE_string('summaries_log_dir', 'summaries', 'Summaries parent.')
flags.DEFINE_integer('steps_per_summaries', 100, 'Steps per summaries.')
flags.DEFINE_integer('devices_per_worker', 1, 'Number of devices per worker.')
flags.DEFINE_integer('num_workers', 1, 'Number workers.')
config_flags.DEFINE_config_file(
'config',
default='test_configs/colorizer.py',
help_string='Training configuration file.')
FLAGS = flags.FLAGS
def restore_checkpoint(model, ema, strategy, latest_ckpt=None, optimizer=None):
if optimizer is None:
ckpt_func = functools.partial(
train_utils.create_checkpoint, models=model, ema=ema)
else:
ckpt_func = functools.partial(
train_utils.create_checkpoint, models=model, ema=ema,
optimizer=optimizer)
checkpoint = train_utils.with_strategy(ckpt_func, strategy)
if latest_ckpt:
logging.info('Restoring from pretrained directory: %s', latest_ckpt)
train_utils.with_strategy(lambda: checkpoint.restore(latest_ckpt), strategy)
return checkpoint
def is_tpu():
return FLAGS.accelerator_type == 'TPU'
def loss_on_batch(inputs, model, config, training=False):
"""Loss on a batch of inputs."""
logits, aux_output = model.get_logits(
inputs_dict=inputs, train_config=config, training=training)
loss, aux_loss_dict = model.loss(
targets=inputs, logits=logits, train_config=config, training=training,
aux_output=aux_output)
loss_factor = config.get('loss_factor', 1.0)
loss_dict = collections.OrderedDict()
loss_dict['loss'] = loss
total_loss = loss_factor * loss
for aux_key, aux_loss in aux_loss_dict.items():
aux_loss_factor = config.get(f'{aux_key}_loss_factor', 1.0)
loss_dict[aux_key] = aux_loss
total_loss += aux_loss_factor * aux_loss
loss_dict['total_loss'] = total_loss
extra_info = collections.OrderedDict([
('scalar', loss_dict),
])
return total_loss, extra_info
def train_step(config,
model,
optimizer,
metrics,
ema=None,
strategy=None):
"""Training StepFn."""
def step_fn(inputs):
"""Per-Replica StepFn."""
with tf.GradientTape() as tape:
loss, extra = loss_on_batch(inputs, model, config, training=True)
scaled_loss = loss
if strategy:
scaled_loss /= float(strategy.num_replicas_in_sync)
grads = tape.gradient(scaled_loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
for metric_key, metric in metrics.items():
metric.update_state(extra['scalar'][metric_key])
if ema is not None:
ema.apply(model.trainable_variables)
return loss
return train_utils.step_with_strategy(step_fn, strategy)
def build(config, batch_size, is_train=False):
optimizer = train_utils.build_optimizer(config)
ema_vars = []
downsample = config.get('downsample', False)
downsample_res = config.get('downsample_res', 64)
h, w = config.resolution
if config.model.name == 'coltran_core':
if downsample:
h, w = downsample_res, downsample_res
zero = tf.zeros((batch_size, h, w, 3), dtype=tf.int32)
model = colorizer.ColTranCore(config.model)
model(zero, training=is_train)
c = 1 if is_train else 3
if config.model.name == 'color_upsampler':
if downsample:
h, w = downsample_res, downsample_res
zero_slice = tf.zeros((batch_size, h, w, c), dtype=tf.int32)
zero = tf.zeros((batch_size, h, w, 3), dtype=tf.int32)
model = upsampler.ColorUpsampler(config.model)
model(zero, inputs_slice=zero_slice, training=is_train)
elif config.model.name == 'spatial_upsampler':
zero_slice = tf.zeros((batch_size, h, w, c), dtype=tf.int32)
zero = tf.zeros((batch_size, h, w, 3), dtype=tf.int32)
model = upsampler.SpatialUpsampler(config.model)
model(zero, inputs_slice=zero_slice, training=is_train)
ema_vars = model.trainable_variables
ema = train_utils.build_ema(config, ema_vars)
return model, optimizer, ema
###############################################################################
## Train.
###############################################################################
def train(logdir):
config = FLAGS.config
steps_per_write = FLAGS.steps_per_summaries
train_utils.write_config(config, logdir)
strategy, batch_size = train_utils.setup_strategy(
config, FLAGS.master,
FLAGS.devices_per_worker, FLAGS.mode, FLAGS.accelerator_type)
def input_fn(input_context=None):
read_config = None
if input_context is not None:
read_config = tfds.ReadConfig(input_context=input_context)
dataset = datasets.get_dataset(
name=FLAGS.dataset,
config=config,
batch_size=config.batch_size,
subset='train',
read_config=read_config,
data_dir=FLAGS.data_dir)
return dataset
# DATASET CREATION.
logging.info('Building dataset.')
train_dataset = train_utils.dataset_with_strategy(input_fn, strategy)
data_iterator = iter(train_dataset)
# MODEL BUILDING
logging.info('Building model.')
model, optimizer, ema = train_utils.with_strategy(
lambda: build(config, batch_size, True), strategy)
model.summary(120, print_fn=logging.info)
# METRIC CREATION.
metrics = {}
metric_keys = ['loss', 'total_loss']
metric_keys += model.metric_keys
for metric_key in metric_keys:
func = functools.partial(tf.keras.metrics.Mean, metric_key)
curr_metric = train_utils.with_strategy(func, strategy)
metrics[metric_key] = curr_metric
# CHECKPOINTING LOGIC.
if FLAGS.pretrain_dir is not None:
pretrain_ckpt = tf.train.latest_checkpoint(FLAGS.pretrain_dir)
assert pretrain_ckpt
# Load the entire model without the optimizer from the checkpoints.
restore_checkpoint(model, ema, strategy, pretrain_ckpt, optimizer=None)
# New tf.train.Checkpoint instance with a reset optimizer.
checkpoint = restore_checkpoint(
model, ema, strategy, latest_ckpt=None, optimizer=optimizer)
else:
latest_ckpt = tf.train.latest_checkpoint(logdir)
checkpoint = restore_checkpoint(
model, ema, strategy, latest_ckpt, optimizer=optimizer)
checkpoint = tf.train.CheckpointManager(
checkpoint, directory=logdir, checkpoint_name='model', max_to_keep=10)
if optimizer.iterations.numpy() == 0:
checkpoint_name = checkpoint.save()
logging.info('Saved checkpoint to %s', checkpoint_name)
train_summary_dir = os.path.join(logdir, 'train_summaries')
writer = tf.summary.create_file_writer(train_summary_dir)
start_time = time.time()
logging.info('Start Training.')
# This hack of wrapping up multiple train steps with a tf.function call
# speeds up training significantly.
# See: https://www.tensorflow.org/guide/tpu#improving_performance_by_multiple_steps_within_tffunction # pylint: disable=line-too-long
@tf.function
def train_multiple_steps(iterator, steps_per_epoch):
train_step_f = train_step(config, model, optimizer, metrics, ema,
strategy)
for _ in range(steps_per_epoch):
train_step_f(iterator)
while optimizer.iterations.numpy() < config.get('max_train_steps', 1000000):
num_train_steps = optimizer.iterations
for metric_key in metric_keys:
metrics[metric_key].reset_states()
start_run = time.time()
train_multiple_steps(data_iterator, tf.convert_to_tensor(steps_per_write))
steps_per_sec = steps_per_write / (time.time() - start_run)
with writer.as_default():
for metric_key, metric in metrics.items():
metric_np = metric.result().numpy()
tf.summary.scalar(metric_key, metric_np, step=num_train_steps)
if metric_key == 'total_loss':
logging.info('Loss: %.3f bits/dim, Speed: %.3f steps/second',
metric_np, steps_per_sec)
if time.time() - start_time > config.save_checkpoint_secs:
checkpoint_name = checkpoint.save()
logging.info('Saved checkpoint to %s', checkpoint_name)
start_time = time.time()
###############################################################################
## Evaluating.
###############################################################################
def evaluate(logdir, subset):
"""Executes the evaluation loop."""
config = FLAGS.config
strategy, batch_size = train_utils.setup_strategy(
config, FLAGS.master,
FLAGS.devices_per_worker, FLAGS.mode, FLAGS.accelerator_type)
def input_fn(_=None):
return datasets.get_dataset(
name=config.dataset,
config=config,
batch_size=config.eval_batch_size,
subset=subset)
model, optimizer, ema = train_utils.with_strategy(
lambda: build(config, batch_size, False), strategy)
metric_keys = ['loss', 'total_loss']
# metric_keys += model.metric_keys
metrics = {}
for metric_key in metric_keys:
func = functools.partial(tf.keras.metrics.Mean, metric_key)
curr_metric = train_utils.with_strategy(func, strategy)
metrics[metric_key] = curr_metric
checkpoints = train_utils.with_strategy(
lambda: train_utils.create_checkpoint(model, optimizer, ema),
strategy)
dataset = train_utils.dataset_with_strategy(input_fn, strategy)
def step_fn(batch):
_, extra = loss_on_batch(batch, model, config, training=False)
for metric_key in metric_keys:
curr_metric = metrics[metric_key]
curr_scalar = extra['scalar'][metric_key]
curr_metric.update_state(curr_scalar)
num_examples = config.eval_num_examples
eval_step = train_utils.step_with_strategy(step_fn, strategy)
ckpt_path = None
wait_max = config.get(
'eval_checkpoint_wait_secs', config.save_checkpoint_secs * 100)
is_ema = True if ema else False
eval_summary_dir = os.path.join(
logdir, 'eval_{}_summaries_pyk_{}'.format(subset, is_ema))
writer = tf.summary.create_file_writer(eval_summary_dir)
while True:
ckpt_path = train_utils.wait_for_checkpoint(logdir, ckpt_path, wait_max)
logging.info(ckpt_path)
if ckpt_path is None:
logging.info('Timed out waiting for checkpoint.')
break
train_utils.with_strategy(
lambda: train_utils.restore(model, checkpoints, logdir, ema),
strategy)
data_iterator = iter(dataset)
num_steps = num_examples // batch_size
for metric_key, metric in metrics.items():
metric.reset_states()
logging.info('Starting evaluation.')
done = False
for i in range(0, num_steps, FLAGS.steps_per_summaries):
start_run = time.time()
for k in range(min(num_steps - i, FLAGS.steps_per_summaries)):
try:
if k % 10 == 0:
logging.info('Step: %d', (i + k + 1))
eval_step(data_iterator)
except (StopIteration, tf.errors.OutOfRangeError):
done = True
break
if done:
break
bits_per_dim = metrics['loss'].result()
logging.info('Bits/Dim: %.3f, Speed: %.3f seconds/step, Step: %d/%d',
bits_per_dim,
(time.time() - start_run) / FLAGS.steps_per_summaries,
i + k + 1, num_steps)
# logging.info('Final Bits/Dim: %.3f', bits_per_dim)
with writer.as_default():
for metric_key, metric in metrics.items():
curr_scalar = metric.result().numpy()
tf.summary.scalar(metric_key, curr_scalar, step=optimizer.iterations)
def main(_):
logging.info('Logging to %s.', FLAGS.logdir)
if FLAGS.mode == 'train':
logging.info('[main] I am the trainer.')
try:
train(FLAGS.logdir)
# During TPU Preemeption, the coordinator hangs with the error below.
# the exception forces the coordinator to fail, and it will be restarted.
except (tf.errors.UnavailableError, tf.errors.CancelledError):
os._exit(os.EX_TEMPFAIL) # pylint: disable=protected-access
elif FLAGS.mode.startswith('train'):
logging.info('[main] I am the trainer.')
train(os.path.join(FLAGS.logdir, FLAGS.mode))
elif FLAGS.mode == 'eval_train':
logging.info('[main] I am the training set evaluator.')
evaluate(FLAGS.logdir, subset='train')
elif FLAGS.mode == 'eval_valid':
logging.info('[main] I am the validation set evaluator.')
evaluate(FLAGS.logdir, subset='valid')
elif FLAGS.mode == 'eval_test':
logging.info('[main] I am the test set evaluator.')
evaluate(FLAGS.logdir, subset='test')
else:
raise ValueError(
'Unknown mode {}. '
'Must be one of [train, eval_train, eval_valid, eval_test]'.format(
FLAGS.mode))
if __name__ == '__main__':
app.run(main)
| [((47, 0, 48, 73), 'absl.flags.DEFINE_enum', 'flags.DEFINE_enum', ({(47, 18, 47, 24): '"""mode"""', (47, 26, 47, 33): '"""train"""', (47, 35, 48, 53): "['train', 'eval_train', 'eval_valid', 'eval_test']", (48, 55, 48, 72): '"""Operation mode."""'}, {}), "('mode', 'train', ['train', 'eval_train', 'eval_valid',\n 'eval_test'], 'Operation mode.')", False, 'from absl import flags\n'), ((50, 0, 50, 69), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', ({(50, 20, 50, 28): '"""logdir"""', (50, 30, 50, 40): '"""/tmp/svt"""', (50, 42, 50, 68): '"""Main directory for logs."""'}, {}), "('logdir', '/tmp/svt', 'Main directory for logs.')", False, 'from absl import flags\n'), ((51, 0, 52, 64), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', ({(51, 20, 51, 28): '"""master"""', (51, 30, 51, 37): '"""local"""', (52, 20, 52, 63): '"""BNS name of the TensorFlow master to use."""'}, {}), "('master', 'local',\n 'BNS name of the TensorFlow master to use.')", False, 'from absl import flags\n'), ((53, 0, 54, 35), 'absl.flags.DEFINE_enum', 'flags.DEFINE_enum', ({(53, 18, 53, 36): '"""accelerator_type"""', (53, 38, 53, 43): '"""GPU"""', (53, 45, 53, 66): "['CPU', 'GPU', 'TPU']", (54, 18, 54, 34): '"""Hardware type."""'}, {}), "('accelerator_type', 'GPU', ['CPU', 'GPU', 'TPU'],\n 'Hardware type.')", False, 'from absl import flags\n'), ((55, 0, 55, 75), 'absl.flags.DEFINE_enum', 'flags.DEFINE_enum', ({(55, 18, 55, 27): '"""dataset"""', (55, 29, 55, 39): '"""imagenet"""', (55, 41, 55, 63): "['imagenet', 'custom']", (55, 65, 55, 74): '"""Dataset"""'}, {}), "('dataset', 'imagenet', ['imagenet', 'custom'], 'Dataset')", False, 'from absl import flags\n'), ((56, 0, 56, 74), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', ({(56, 20, 56, 30): '"""data_dir"""', (56, 32, 56, 36): 'None', (56, 38, 56, 73): '"""Data directory for custom images."""'}, {}), "('data_dir', None, 'Data directory for custom images.')", False, 'from absl import flags\n'), ((57, 0, 57, 79), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', ({(57, 20, 57, 37): '"""tpu_worker_name"""', (57, 39, 57, 51): '"""tpu_worker"""', (57, 53, 57, 78): '"""Name of the TPU worker."""'}, {}), "('tpu_worker_name', 'tpu_worker', 'Name of the TPU worker.')", False, 'from absl import flags\n'), ((58, 0, 59, 67), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', ({(59, 4, 59, 18): '"""pretrain_dir"""', (59, 20, 59, 24): 'None', (59, 26, 59, 66): '"""Finetune from a pretrained checkpoint."""'}, {}), "('pretrain_dir', None,\n 'Finetune from a pretrained checkpoint.')", False, 'from absl import flags\n'), ((60, 0, 60, 74), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', ({(60, 20, 60, 39): '"""summaries_log_dir"""', (60, 41, 60, 52): '"""summaries"""', (60, 54, 60, 73): '"""Summaries parent."""'}, {}), "('summaries_log_dir', 'summaries', 'Summaries parent.')", False, 'from absl import flags\n'), ((61, 0, 61, 72), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', ({(61, 21, 61, 42): '"""steps_per_summaries"""', (61, 44, 61, 47): '(100)', (61, 49, 61, 71): '"""Steps per summaries."""'}, {}), "('steps_per_summaries', 100, 'Steps per summaries.')", False, 'from absl import flags\n'), ((62, 0, 62, 78), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', ({(62, 21, 62, 41): '"""devices_per_worker"""', (62, 43, 62, 44): '(1)', (62, 46, 62, 77): '"""Number of devices per worker."""'}, {}), "('devices_per_worker', 1, 'Number of devices per worker.')", False, 'from absl import flags\n'), ((63, 0, 63, 57), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', ({(63, 21, 63, 34): '"""num_workers"""', (63, 36, 63, 37): '(1)', (63, 39, 63, 56): '"""Number workers."""'}, {}), "('num_workers', 1, 'Number workers.')", False, 'from absl import flags\n'), ((64, 0, 67, 47), 'ml_collections.config_flags.DEFINE_config_file', 'config_flags.DEFINE_config_file', (), '', False, 'from ml_collections import config_flags\n'), ((81, 15, 81, 61), 'coltran.utils.train_utils.with_strategy', 'train_utils.with_strategy', ({(81, 41, 81, 50): 'ckpt_func', (81, 52, 81, 60): 'strategy'}, {}), '(ckpt_func, strategy)', False, 'from coltran.utils import train_utils\n'), ((101, 14, 101, 39), 'collections.OrderedDict', 'collections.OrderedDict', ({}, {}), '()', False, 'import collections\n'), ((111, 15, 113, 4), 'collections.OrderedDict', 'collections.OrderedDict', ({(111, 39, 113, 3): "[('scalar', loss_dict)]"}, {}), "([('scalar', loss_dict)])", False, 'import collections\n'), ((142, 9, 142, 58), 'coltran.utils.train_utils.step_with_strategy', 'train_utils.step_with_strategy', ({(142, 40, 142, 47): 'step_fn', (142, 49, 142, 57): 'strategy'}, {}), '(step_fn, strategy)', False, 'from coltran.utils import train_utils\n'), ((146, 14, 146, 49), 'coltran.utils.train_utils.build_optimizer', 'train_utils.build_optimizer', ({(146, 42, 146, 48): 'config'}, {}), '(config)', False, 'from coltran.utils import train_utils\n'), ((175, 8, 175, 47), 'coltran.utils.train_utils.build_ema', 'train_utils.build_ema', ({(175, 30, 175, 36): 'config', (175, 38, 175, 46): 'ema_vars'}, {}), '(config, ema_vars)', False, 'from coltran.utils import train_utils\n'), ((185, 2, 185, 42), 'coltran.utils.train_utils.write_config', 'train_utils.write_config', ({(185, 27, 185, 33): 'config', (185, 35, 185, 41): 'logdir'}, {}), '(config, logdir)', False, 'from coltran.utils import train_utils\n'), ((187, 25, 189, 67), 'coltran.utils.train_utils.setup_strategy', 'train_utils.setup_strategy', ({(188, 6, 188, 12): 'config', (188, 14, 188, 26): 'FLAGS.master', (189, 6, 189, 30): 'FLAGS.devices_per_worker', (189, 32, 189, 42): 'FLAGS.mode', (189, 44, 189, 66): 'FLAGS.accelerator_type'}, {}), '(config, FLAGS.master, FLAGS.devices_per_worker,\n FLAGS.mode, FLAGS.accelerator_type)', False, 'from coltran.utils import train_utils\n'), ((206, 2, 206, 35), 'absl.logging.info', 'logging.info', ({(206, 15, 206, 34): '"""Building dataset."""'}, {}), "('Building dataset.')", False, 'from absl import logging\n'), ((207, 18, 207, 71), 'coltran.utils.train_utils.dataset_with_strategy', 'train_utils.dataset_with_strategy', ({(207, 52, 207, 60): 'input_fn', (207, 62, 207, 70): 'strategy'}, {}), '(input_fn, strategy)', False, 'from coltran.utils import train_utils\n'), ((211, 2, 211, 33), 'absl.logging.info', 'logging.info', ({(211, 15, 211, 32): '"""Building model."""'}, {}), "('Building model.')", False, 'from absl import logging\n'), ((240, 15, 241, 76), 'tensorflow.train.CheckpointManager', 'tf.train.CheckpointManager', (), '', True, 'import tensorflow as tf\n'), ((246, 22, 246, 61), 'os.path.join', 'os.path.join', ({(246, 35, 246, 41): 'logdir', (246, 43, 246, 60): '"""train_summaries"""'}, {}), "(logdir, 'train_summaries')", False, 'import os\n'), ((247, 11, 247, 59), 'tensorflow.summary.create_file_writer', 'tf.summary.create_file_writer', ({(247, 41, 247, 58): 'train_summary_dir'}, {}), '(train_summary_dir)', True, 'import tensorflow as tf\n'), ((248, 15, 248, 26), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((250, 2, 250, 33), 'absl.logging.info', 'logging.info', ({(250, 15, 250, 32): '"""Start Training."""'}, {}), "('Start Training.')", False, 'from absl import logging\n'), ((298, 25, 300, 67), 'coltran.utils.train_utils.setup_strategy', 'train_utils.setup_strategy', ({(299, 6, 299, 12): 'config', (299, 14, 299, 26): 'FLAGS.master', (300, 6, 300, 30): 'FLAGS.devices_per_worker', (300, 32, 300, 42): 'FLAGS.mode', (300, 44, 300, 66): 'FLAGS.accelerator_type'}, {}), '(config, FLAGS.master, FLAGS.devices_per_worker,\n FLAGS.mode, FLAGS.accelerator_type)', False, 'from coltran.utils import train_utils\n'), ((323, 12, 323, 65), 'coltran.utils.train_utils.dataset_with_strategy', 'train_utils.dataset_with_strategy', ({(323, 46, 323, 54): 'input_fn', (323, 56, 323, 64): 'strategy'}, {}), '(input_fn, strategy)', False, 'from coltran.utils import train_utils\n'), ((334, 14, 334, 63), 'coltran.utils.train_utils.step_with_strategy', 'train_utils.step_with_strategy', ({(334, 45, 334, 52): 'step_fn', (334, 54, 334, 62): 'strategy'}, {}), '(step_fn, strategy)', False, 'from coltran.utils import train_utils\n'), ((342, 11, 342, 58), 'tensorflow.summary.create_file_writer', 'tf.summary.create_file_writer', ({(342, 41, 342, 57): 'eval_summary_dir'}, {}), '(eval_summary_dir)', True, 'import tensorflow as tf\n'), ((388, 2, 388, 46), 'absl.logging.info', 'logging.info', ({(388, 15, 388, 31): '"""Logging to %s."""', (388, 33, 388, 45): 'FLAGS.logdir'}, {}), "('Logging to %s.', FLAGS.logdir)", False, 'from absl import logging\n'), ((417, 2, 417, 15), 'absl.app.run', 'app.run', ({(417, 10, 417, 14): 'main'}, {}), '(main)', False, 'from absl import app\n'), ((74, 16, 75, 61), 'functools.partial', 'functools.partial', (), '', False, 'import functools\n'), ((77, 16, 79, 28), 'functools.partial', 'functools.partial', (), '', False, 'import functools\n'), ((83, 4, 83, 72), 'absl.logging.info', 'logging.info', ({(83, 17, 83, 58): '"""Restoring from pretrained directory: %s"""', (83, 60, 83, 71): 'latest_ckpt'}, {}), "('Restoring from pretrained directory: %s', latest_ckpt)", False, 'from absl import logging\n'), ((156, 11, 156, 58), 'tensorflow.zeros', 'tf.zeros', (), '', True, 'import tensorflow as tf\n'), ((157, 12, 157, 47), 'coltran.models.colorizer.ColTranCore', 'colorizer.ColTranCore', ({(157, 34, 157, 46): 'config.model'}, {}), '(config.model)', False, 'from coltran.models import colorizer\n'), ((164, 17, 164, 64), 'tensorflow.zeros', 'tf.zeros', (), '', True, 'import tensorflow as tf\n'), ((165, 11, 165, 58), 'tensorflow.zeros', 'tf.zeros', (), '', True, 'import tensorflow as tf\n'), ((166, 12, 166, 50), 'coltran.models.upsampler.ColorUpsampler', 'upsampler.ColorUpsampler', ({(166, 37, 166, 49): 'config.model'}, {}), '(config.model)', False, 'from coltran.models import upsampler\n'), ((196, 14, 202, 32), 'coltran.datasets.get_dataset', 'datasets.get_dataset', (), '', False, 'from coltran import datasets\n'), ((221, 11, 221, 63), 'functools.partial', 'functools.partial', ({(221, 29, 221, 50): 'tf.keras.metrics.Mean', (221, 52, 221, 62): 'metric_key'}, {}), '(tf.keras.metrics.Mean, metric_key)', False, 'import functools\n'), ((222, 18, 222, 59), 'coltran.utils.train_utils.with_strategy', 'train_utils.with_strategy', ({(222, 44, 222, 48): 'func', (222, 50, 222, 58): 'strategy'}, {}), '(func, strategy)', False, 'from coltran.utils import train_utils\n'), ((227, 20, 227, 66), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', ({(227, 47, 227, 65): 'FLAGS.pretrain_dir'}, {}), '(FLAGS.pretrain_dir)', True, 'import tensorflow as tf\n'), ((236, 18, 236, 52), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', ({(236, 45, 236, 51): 'logdir'}, {}), '(logdir)', True, 'import tensorflow as tf\n'), ((244, 4, 244, 59), 'absl.logging.info', 'logging.info', ({(244, 17, 244, 41): '"""Saved checkpoint to %s"""', (244, 43, 244, 58): 'checkpoint_name'}, {}), "('Saved checkpoint to %s', checkpoint_name)", False, 'from absl import logging\n'), ((270, 16, 270, 27), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((303, 11, 307, 22), 'coltran.datasets.get_dataset', 'datasets.get_dataset', (), '', False, 'from coltran import datasets\n'), ((316, 11, 316, 63), 'functools.partial', 'functools.partial', ({(316, 29, 316, 50): 'tf.keras.metrics.Mean', (316, 52, 316, 62): 'metric_key'}, {}), '(tf.keras.metrics.Mean, metric_key)', False, 'import functools\n'), ((317, 18, 317, 59), 'coltran.utils.train_utils.with_strategy', 'train_utils.with_strategy', ({(317, 44, 317, 48): 'func', (317, 50, 317, 58): 'strategy'}, {}), '(func, strategy)', False, 'from coltran.utils import train_utils\n'), ((345, 16, 345, 76), 'coltran.utils.train_utils.wait_for_checkpoint', 'train_utils.wait_for_checkpoint', ({(345, 48, 345, 54): 'logdir', (345, 56, 345, 65): 'ckpt_path', (345, 67, 345, 75): 'wait_max'}, {}), '(logdir, ckpt_path, wait_max)', False, 'from coltran.utils import train_utils\n'), ((346, 4, 346, 27), 'absl.logging.info', 'logging.info', ({(346, 17, 346, 26): 'ckpt_path'}, {}), '(ckpt_path)', False, 'from absl import logging\n'), ((360, 4, 360, 40), 'absl.logging.info', 'logging.info', ({(360, 17, 360, 39): '"""Starting evaluation."""'}, {}), "('Starting evaluation.')", False, 'from absl import logging\n'), ((390, 4, 390, 44), 'absl.logging.info', 'logging.info', ({(390, 17, 390, 43): '"""[main] I am the trainer."""'}, {}), "('[main] I am the trainer.')", False, 'from absl import logging\n'), ((127, 9, 127, 26), 'tensorflow.GradientTape', 'tf.GradientTape', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((169, 17, 169, 64), 'tensorflow.zeros', 'tf.zeros', (), '', True, 'import tensorflow as tf\n'), ((170, 11, 170, 58), 'tensorflow.zeros', 'tf.zeros', (), '', True, 'import tensorflow as tf\n'), ((171, 12, 171, 52), 'coltran.models.upsampler.SpatialUpsampler', 'upsampler.SpatialUpsampler', ({(171, 39, 171, 51): 'config.model'}, {}), '(config.model)', False, 'from coltran.models import upsampler\n'), ((194, 20, 194, 64), 'tensorflow_datasets.ReadConfig', 'tfds.ReadConfig', (), '', True, 'import tensorflow_datasets as tfds\n'), ((272, 40, 272, 77), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', ({(272, 61, 272, 76): 'steps_per_write'}, {}), '(steps_per_write)', True, 'import tensorflow as tf\n'), ((286, 6, 286, 61), 'absl.logging.info', 'logging.info', ({(286, 19, 286, 43): '"""Saved checkpoint to %s"""', (286, 45, 286, 60): 'checkpoint_name'}, {}), "('Saved checkpoint to %s', checkpoint_name)", False, 'from absl import logging\n'), ((287, 19, 287, 30), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((321, 14, 321, 66), 'coltran.utils.train_utils.create_checkpoint', 'train_utils.create_checkpoint', ({(321, 44, 321, 49): 'model', (321, 51, 321, 60): 'optimizer', (321, 62, 321, 65): 'ema'}, {}), '(model, optimizer, ema)', False, 'from coltran.utils import train_utils\n'), ((348, 6, 348, 55), 'absl.logging.info', 'logging.info', ({(348, 19, 348, 54): '"""Timed out waiting for checkpoint."""'}, {}), "('Timed out waiting for checkpoint.')", False, 'from absl import logging\n'), ((363, 18, 363, 29), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((398, 4, 398, 44), 'absl.logging.info', 'logging.info', ({(398, 17, 398, 43): '"""[main] I am the trainer."""'}, {}), "('[main] I am the trainer.')", False, 'from absl import logging\n'), ((274, 39, 274, 50), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((278, 8, 278, 70), 'tensorflow.summary.scalar', 'tf.summary.scalar', (), '', True, 'import tensorflow as tf\n'), ((284, 7, 284, 18), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((352, 16, 352, 68), 'coltran.utils.train_utils.restore', 'train_utils.restore', ({(352, 36, 352, 41): 'model', (352, 43, 352, 54): 'checkpoints', (352, 56, 352, 62): 'logdir', (352, 64, 352, 67): 'ema'}, {}), '(model, checkpoints, logdir, ema)', False, 'from coltran.utils import train_utils\n'), ((384, 8, 384, 77), 'tensorflow.summary.scalar', 'tf.summary.scalar', (), '', True, 'import tensorflow as tf\n'), ((396, 6, 396, 30), 'os._exit', 'os._exit', ({(396, 15, 396, 29): 'os.EX_TEMPFAIL'}, {}), '(os.EX_TEMPFAIL)', False, 'import os\n'), ((399, 10, 399, 48), 'os.path.join', 'os.path.join', ({(399, 23, 399, 35): 'FLAGS.logdir', (399, 37, 399, 47): 'FLAGS.mode'}, {}), '(FLAGS.logdir, FLAGS.mode)', False, 'import os\n'), ((401, 4, 401, 59), 'absl.logging.info', 'logging.info', ({(401, 17, 401, 58): '"""[main] I am the training set evaluator."""'}, {}), "('[main] I am the training set evaluator.')", False, 'from absl import logging\n'), ((281, 10, 282, 48), 'absl.logging.info', 'logging.info', ({(281, 23, 281, 70): '"""Loss: %.3f bits/dim, Speed: %.3f steps/second"""', (282, 23, 282, 32): 'metric_np', (282, 34, 282, 47): 'steps_per_sec'}, {}), "('Loss: %.3f bits/dim, Speed: %.3f steps/second', metric_np,\n steps_per_sec)", False, 'from absl import logging\n'), ((404, 4, 404, 61), 'absl.logging.info', 'logging.info', ({(404, 17, 404, 60): '"""[main] I am the validation set evaluator."""'}, {}), "('[main] I am the validation set evaluator.')", False, 'from absl import logging\n'), ((367, 12, 367, 49), 'absl.logging.info', 'logging.info', ({(367, 25, 367, 35): '"""Step: %d"""', (367, 38, 367, 47): '(i + k + 1)'}, {}), "('Step: %d', i + k + 1)", False, 'from absl import logging\n'), ((377, 20, 377, 31), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((407, 4, 407, 55), 'absl.logging.info', 'logging.info', ({(407, 17, 407, 54): '"""[main] I am the test set evaluator."""'}, {}), "('[main] I am the test set evaluator.')", False, 'from absl import logging\n')] |
wenliangdai/sunets-reproduce | train_multi_human.py | d92efa80e8314aea153d498cce3c9c6e30c252bd | import argparse
import math
import os
import pickle
import random
import sys
import numpy as np
import torch
import torch.backends.cudnn as cudnn
from torch import nn
from torch.optim import lr_scheduler
from torch.utils import data
import torchvision.transforms as transforms
import transforms as extended_transforms
from loss import prediction_stat
from main import get_data_path
from main.loader import get_loader
from main.models import get_model
from utils import dotdict, float2str
# paths
ROOT = '/home/wenlidai/sunets-reproduce/'
RESULT = 'results'
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def main(args):
print('='*10, 'Starting', '='*10, '\n')
print(device)
# Set the seed for reproducing the results
random.seed(args.manual_seed)
np.random.seed(args.manual_seed)
torch.manual_seed(args.manual_seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(args.manual_seed)
cudnn.benchmark = True
# Set up results folder
if not os.path.exists(os.path.join(ROOT, RESULT, 'saved_val_images')):
os.makedirs(os.path.join(ROOT, RESULT, 'saved_val_images'))
if not os.path.exists(os.path.join(ROOT, RESULT, 'saved_train_images')):
os.makedirs(os.path.join(ROOT, RESULT, 'saved_train_images'))
# Setup Dataloader
data_loader = get_loader(args.dataset)
input_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
target_transform = extended_transforms.MaskToTensor()
traindata = data_loader('train', n_classes=args.n_classes, transform=input_transform, target_transform=target_transform, do_transform=True)
trainloader = data.DataLoader(traindata, batch_size=args.batch_size, num_workers=2, shuffle=True)
valdata = data_loader('val', n_classes=args.n_classes, transform=input_transform, target_transform=target_transform)
valloader = data.DataLoader(valdata, batch_size=args.batch_size, num_workers=2, shuffle=False)
n_classes = traindata.n_classes
n_trainsamples = len(traindata)
n_iters_per_epoch = np.ceil(n_trainsamples / float(args.batch_size * args.iter_size))
# Setup Model
model = get_model(
name=args.arch,
n_classes=n_classes,
ignore_index=traindata.ignore_index,
output_stride=args.output_stride,
pretrained=args.pretrained,
momentum_bn=args.momentum_bn,
dprob=args.dprob
).to(device)
epochs_done=0
X=[]
Y1=[]
Y1_test=[]
Y2=[]
Y2_test=[]
avg_pixel_acc = 0
mean_class_acc = 0
mIoU = 0
avg_pixel_acc_test = 0
mean_class_acc_test = 0
mIoU_test = 0
best_mIoU = 0
best_epoch = 0
if args.model_path:
model_name = args.model_path.split('.')
checkpoint_name = model_name[0] + '_optimizer.pkl'
checkpoint = torch.load(os.path.join(ROOT, RESULT, checkpoint_name))
optm = checkpoint['optimizer']
model.load_state_dict(checkpoint['state_dict'])
split_str = model_name[0].split('_')
epochs_done = int(split_str[-1])
saved_loss = pickle.load( open(os.path.join(ROOT, RESULT, "saved_loss.p"), "rb") )
saved_accuracy = pickle.load( open(os.path.join(ROOT, RESULT, "saved_accuracy.p"), "rb") )
X=saved_loss["X"][:epochs_done]
Y=saved_loss["Y"][:epochs_done]
Y_test=saved_loss["Y_test"][:epochs_done]
avg_pixel_acc = saved_accuracy["P"][:epochs_done,:]
mean_class_acc = saved_accuracy["M"][:epochs_done,:]
mIoU = saved_accuracy["I"][:epochs_done,:]
avg_pixel_acc_test = saved_accuracy["P_test"][:epochs_done,:]
mean_class_acc_test = saved_accuracy["M_test"][:epochs_done,:]
mIoU_test = saved_accuracy["I_test"][:epochs_done,:]
if args.best_model_path:
best_model_name = args.best_model_path.split('_')
best_mIoU = float(best_model_name[-2])
best_epoch = int(best_model_name[-3])
# Learning rates: For new layers (such as final layer), we set lr to be 10x the learning rate of layers already trained
bias_10x_params = filter(lambda x: ('bias' in x[0]) and ('final' in x[0]) and ('conv' in x[0]),
model.named_parameters())
bias_10x_params = list(map(lambda x: x[1], bias_10x_params))
bias_params = filter(lambda x: ('bias' in x[0]) and ('final' not in x[0]),
model.named_parameters())
bias_params = list(map(lambda x: x[1], bias_params))
nonbias_10x_params = filter(lambda x: (('bias' not in x[0]) or ('bn' in x[0])) and ('final' in x[0]),
model.named_parameters())
nonbias_10x_params = list(map(lambda x: x[1], nonbias_10x_params))
nonbias_params = filter(lambda x: ('bias' not in x[0]) and ('final' not in x[0]),
model.named_parameters())
nonbias_params = list(map(lambda x: x[1], nonbias_params))
optimizer = torch.optim.SGD([{'params': bias_params, 'lr': args.lr},
{'params': bias_10x_params, 'lr': 20 * args.lr if args.pretrained else args.lr},
{'params': nonbias_10x_params, 'lr': 10 * args.lr if args.pretrained else args.lr},
{'params': nonbias_params, 'lr': args.lr},],
lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay,
nesterov=(args.optim == 'Nesterov'))
num_param_groups = 4
# optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
# Setting up scheduler
if args.model_path and args.restore:
# Here we restore all states of optimizer
optimizer.load_state_dict(optm)
total_iters = n_iters_per_epoch * args.epochs
lambda1 = lambda step: 0.5 + 0.5 * math.cos(np.pi * step / total_iters)
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=[lambda1]*num_param_groups, last_epoch=epochs_done*n_iters_per_epoch)
# scheduler = lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.1, last_epoch=epochs_done)
else:
# scheduler = lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.1)
# Here we simply restart the training
# if args.T0:
# total_iters = args.T0 * n_iters_per_epoch
# else:
total_iters = ((args.epochs - epochs_done) * n_iters_per_epoch)
lambda1 = lambda step: 0.5 + 0.5 * math.cos(np.pi * step / total_iters)
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=[lambda1]*num_param_groups)
global l_avg, totalclasswise_pixel_acc, totalclasswise_gtpixels, totalclasswise_predpixels
global l_avg_test, totalclasswise_pixel_acc_test, totalclasswise_gtpixels_test, totalclasswise_predpixels_test
global steps, steps_test
criterion_sbd = nn.CrossEntropyLoss(size_average=False, ignore_index=traindata.ignore_index)
criterion_lip = nn.CrossEntropyLoss(size_average=False, ignore_index=traindata.ignore_index)
criterions = [criterion_sbd, criterion_lip]
for epoch in range(epochs_done, args.epochs):
print('='*10, 'Epoch %d' % (epoch + 1), '='*10)
l_avg = [0, 0]
totalclasswise_pixel_acc = [0, 0]
totalclasswise_gtpixels = [0, 0]
totalclasswise_predpixels = [0, 0]
l_avg_test = [0, 0]
totalclasswise_pixel_acc_test = [0, 0]
totalclasswise_gtpixels_test = [0, 0]
totalclasswise_predpixels_test = [0, 0]
steps = [0, 0]
steps_test = [0, 0]
# scheduler.step()
train(model, optimizer, criterions, trainloader, epoch, scheduler, traindata)
val(model, criterions, valloader, epoch, valdata)
# save the model every 5 epochs
if (epoch + 1) % 5 == 0 or epoch == args.epochs - 1:
if (epoch + 1) > 5:
os.remove(os.path.join(ROOT, RESULT, "{}_{}_{}.pkl".format(args.arch, args.dataset, epoch - 4)))
os.remove(os.path.join(ROOT, RESULT, "{}_{}_{}_optimizer.pkl".format(args.arch, args.dataset, epoch - 4)))
torch.save(model, os.path.join(ROOT, RESULT, "{}_{}_{}.pkl".format(args.arch, args.dataset, epoch + 1)))
torch.save({'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()},
os.path.join(ROOT, RESULT, "{}_{}_{}_optimizer.pkl".format(args.arch, args.dataset, epoch + 1)))
# remove old loss & accuracy files
if os.path.isfile(os.path.join(ROOT, RESULT, "saved_loss.p")):
os.remove(os.path.join(ROOT, RESULT, "saved_loss.p"))
if os.path.isfile(os.path.join(ROOT, RESULT, "saved_accuracy.p")):
os.remove(os.path.join(ROOT, RESULT, "saved_accuracy.p"))
# save train and validation loss
X.append(epoch + 1)
Y1.append(l_avg[0] / steps[0])
Y1_test.append(l_avg_test[0] / steps_test[0])
Y2.append(l_avg[1] / steps[1])
Y2_test.append(l_avg_test[1] / steps_test[1])
saved_loss={"X": X, "Y1": Y1, "Y2": Y2, "Y1_test": Y1_test, "Y2_test": Y2_test}
pickle.dump(saved_loss, open(os.path.join(ROOT, RESULT, "saved_loss.p"), "wb"))
# pixel accuracy
totalclasswise_pixel_acc[0] = totalclasswise_pixel_acc[0].reshape((-1, n_classes[0])).astype(np.float32)
totalclasswise_gtpixels[0] = totalclasswise_gtpixels[0].reshape((-1, n_classes[0]))
totalclasswise_predpixels[0] = totalclasswise_predpixels[0].reshape((-1, n_classes[0]))
totalclasswise_pixel_acc_test[0] = totalclasswise_pixel_acc_test[0].reshape((-1, n_classes[0])).astype(np.float32)
totalclasswise_gtpixels_test[0] = totalclasswise_gtpixels_test[0].reshape((-1, n_classes[0]))
totalclasswise_predpixels_test[0] = totalclasswise_predpixels_test[0].reshape((-1, n_classes[0]))
totalclasswise_pixel_acc[1] = totalclasswise_pixel_acc[1].reshape((-1, n_classes[1])).astype(np.float32)
totalclasswise_gtpixels[1] = totalclasswise_gtpixels[1].reshape((-1, n_classes[1]))
totalclasswise_predpixels[1] = totalclasswise_predpixels[1].reshape((-1, n_classes[1]))
totalclasswise_pixel_acc_test[1] = totalclasswise_pixel_acc_test[1].reshape((-1, n_classes[1])).astype(np.float32)
totalclasswise_gtpixels_test[1] = totalclasswise_gtpixels_test[1].reshape((-1, n_classes[1]))
totalclasswise_predpixels_test[1] = totalclasswise_predpixels_test[1].reshape((-1, n_classes[1]))
if isinstance(avg_pixel_acc, list):
avg_pixel_acc[0] = np.vstack((avg_pixel_acc[0], np.sum(totalclasswise_pixel_acc[0], axis=1) / np.sum(totalclasswise_gtpixels[0], axis=1)))
mean_class_acc[0] = np.vstack((mean_class_acc[0], np.mean(totalclasswise_pixel_acc[0] / totalclasswise_gtpixels[0], axis=1)))
mIoU[0] = np.vstack((mIoU[0], np.mean(totalclasswise_pixel_acc[0] / (totalclasswise_gtpixels[0] + totalclasswise_predpixels[0] - totalclasswise_pixel_acc[0]), axis=1)))
avg_pixel_acc[1] = np.vstack((avg_pixel_acc[1], np.sum(totalclasswise_pixel_acc[1], axis=1) / np.sum(totalclasswise_gtpixels[1], axis=1)))
mean_class_acc[1] = np.vstack((mean_class_acc[1], np.mean(totalclasswise_pixel_acc[1] / totalclasswise_gtpixels[1], axis=1)))
mIoU[1] = np.vstack((mIoU[1], np.mean(totalclasswise_pixel_acc[1] / (totalclasswise_gtpixels[1] + totalclasswise_predpixels[1] - totalclasswise_pixel_acc[1]), axis=1)))
avg_pixel_acc_test[0] = np.vstack((avg_pixel_acc_test[0], np.sum(totalclasswise_pixel_acc_test[0],axis=1) / np.sum(totalclasswise_gtpixels_test[0], axis=1)))
mean_class_acc_test[0] = np.vstack((mean_class_acc_test[0], np.mean(totalclasswise_pixel_acc_test[0] / totalclasswise_gtpixels_test[0], axis=1)))
mIoU_test[0] = np.vstack((mIoU_test[0], np.mean(totalclasswise_pixel_acc_test[0] / (totalclasswise_gtpixels_test[0] + totalclasswise_predpixels_test[0] - totalclasswise_pixel_acc_test[0]), axis=1)))
avg_pixel_acc_test[1] = np.vstack((avg_pixel_acc_test[1], np.sum(totalclasswise_pixel_acc_test[1],axis=1) / np.sum(totalclasswise_gtpixels_test[1], axis=1)))
mean_class_acc_test[1] = np.vstack((mean_class_acc_test[1], np.mean(totalclasswise_pixel_acc_test[1] / totalclasswise_gtpixels_test[1], axis=1)))
mIoU_test[1] = np.vstack((mIoU_test[1], np.mean(totalclasswise_pixel_acc_test[1] / (totalclasswise_gtpixels_test[1] + totalclasswise_predpixels_test[1] - totalclasswise_pixel_acc_test[1]), axis=1)))
else:
avg_pixel_acc = []
mean_class_acc = []
mIoU = []
avg_pixel_acc.append( np.sum(totalclasswise_pixel_acc[0], axis=1) / np.sum(totalclasswise_gtpixels[0], axis=1) )
mean_class_acc.append( np.mean(totalclasswise_pixel_acc[0] / totalclasswise_gtpixels[0], axis=1) )
mIoU.append( np.mean(totalclasswise_pixel_acc[0] / (totalclasswise_gtpixels[0] + totalclasswise_predpixels[0] - totalclasswise_pixel_acc[0]), axis=1) )
avg_pixel_acc.append( np.sum(totalclasswise_pixel_acc[1], axis=1) / np.sum(totalclasswise_gtpixels[1], axis=1) )
mean_class_acc.append( np.mean(totalclasswise_pixel_acc[1] / totalclasswise_gtpixels[1], axis=1) )
mIoU.append( np.mean(totalclasswise_pixel_acc[1] / (totalclasswise_gtpixels[1] + totalclasswise_predpixels[1] - totalclasswise_pixel_acc[1]), axis=1) )
avg_pixel_acc_test = []
mean_class_acc_test = []
mIoU_test = []
avg_pixel_acc_test.append( np.sum(totalclasswise_pixel_acc_test[0], axis=1) / np.sum(totalclasswise_gtpixels_test[0], axis=1) )
mean_class_acc_test.append( np.mean(totalclasswise_pixel_acc_test[0] / totalclasswise_gtpixels_test[0], axis=1) )
mIoU_test.append( np.mean(totalclasswise_pixel_acc_test[0] / (totalclasswise_gtpixels_test[0] + totalclasswise_predpixels_test[0] - totalclasswise_pixel_acc_test[0]), axis=1) )
avg_pixel_acc_test.append( np.sum(totalclasswise_pixel_acc_test[1], axis=1) / np.sum(totalclasswise_gtpixels_test[1], axis=1) )
mean_class_acc_test.append( np.mean(totalclasswise_pixel_acc_test[1] / totalclasswise_gtpixels_test[1], axis=1) )
mIoU_test.append( np.mean(totalclasswise_pixel_acc_test[1] / (totalclasswise_gtpixels_test[1] + totalclasswise_predpixels_test[1] - totalclasswise_pixel_acc_test[1]), axis=1) )
saved_accuracy = {
"X": X,
"P1": avg_pixel_acc[0], "P2": avg_pixel_acc[1],
"M1": mean_class_acc[0], "M2": mean_class_acc[1],
"I1": mIoU[0], "I2": mIoU[1],
"P1_test": avg_pixel_acc_test[0], "P2_test": avg_pixel_acc_test[1],
"M1_test": mean_class_acc_test[0], "M2_test": mean_class_acc_test[1],
"I1_test": mIoU_test[0], "I2_test": mIoU_test[1]
}
pickle.dump(saved_accuracy, open(os.path.join(ROOT, RESULT, "saved_accuracy.p"), "wb"))
# print validation mIoU of both tasks
this_mIoU1 = np.mean(totalclasswise_pixel_acc_test[0] / (totalclasswise_gtpixels_test[0] + totalclasswise_predpixels_test[0] - totalclasswise_pixel_acc_test[0]), axis=1)[0]
this_mIoU2 = np.mean(totalclasswise_pixel_acc_test[1] / (totalclasswise_gtpixels_test[1] + totalclasswise_predpixels_test[1] - totalclasswise_pixel_acc_test[1]), axis=1)[0]
print('Val: mIoU_sbd = {}, mIoU_lip = {}'.format(this_mIoU1, this_mIoU2))
def train(model, optimizer, criterions, trainloader, epoch, scheduler, data):
global l_avg, totalclasswise_pixel_acc, totalclasswise_gtpixels, totalclasswise_predpixels
global steps
model.train()
for i, (images, sbd_labels, lip_labels) in enumerate(trainloader):
sbd_valid_pixel = float( (sbd_labels.data != criterions[0].ignore_index).long().sum() )
lip_valid_pixel = float( (lip_labels.data != criterions[1].ignore_index).long().sum() )
images = images.to(device)
sbd_labels = sbd_labels.to(device)
lip_labels = lip_labels.to(device)
sbd_outputs, lip_outputs = model(images, task=2)
sbd_loss = criterions[0](sbd_outputs, sbd_labels)
classwise_pixel_acc, classwise_gtpixels, classwise_predpixels = prediction_stat([sbd_outputs], sbd_labels, data.n_classes[0])
classwise_pixel_acc = torch.FloatTensor([classwise_pixel_acc])
classwise_gtpixels = torch.FloatTensor([classwise_gtpixels])
classwise_predpixels = torch.FloatTensor([classwise_predpixels])
totalclasswise_pixel_acc[0] += classwise_pixel_acc.sum(0).data.numpy()
totalclasswise_gtpixels[0] += classwise_gtpixels.sum(0).data.numpy()
totalclasswise_predpixels[0] += classwise_predpixels.sum(0).data.numpy()
sbd_total_loss = sbd_loss.sum()
sbd_total_loss = sbd_total_loss / float(sbd_valid_pixel)
sbd_total_loss.backward(retain_graph=True)
lip_loss = criterions[1](lip_outputs, lip_labels)
classwise_pixel_acc, classwise_gtpixels, classwise_predpixels = prediction_stat([lip_outputs], lip_labels, data.n_classes[1])
classwise_pixel_acc = torch.FloatTensor([classwise_pixel_acc])
classwise_gtpixels = torch.FloatTensor([classwise_gtpixels])
classwise_predpixels = torch.FloatTensor([classwise_predpixels])
totalclasswise_pixel_acc[1] += classwise_pixel_acc.sum(0).data.numpy()
totalclasswise_gtpixels[1] += classwise_gtpixels.sum(0).data.numpy()
totalclasswise_predpixels[1] += classwise_predpixels.sum(0).data.numpy()
lip_total_loss = lip_loss.sum()
lip_total_loss = lip_total_loss / float(lip_valid_pixel)
lip_total_loss.backward()
l_avg[0] += sbd_loss.sum().data.cpu().numpy()
steps[0] += sbd_valid_pixel
l_avg[1] += lip_loss.sum().data.cpu().numpy()
steps[1] += lip_valid_pixel
optimizer.step()
optimizer.zero_grad()
scheduler.step()
# if (i + 1) % args.log_size == 0:
# pickle.dump(images[0].cpu().numpy(),
# open(os.path.join(ROOT, RESULT, "saved_train_images/" + str(epoch) + "_" + str(i) + "_input.p"), "wb"))
# pickle.dump(np.transpose(data.decode_segmap(outputs[0].data.cpu().numpy().argmax(0)), [2, 0, 1]),
# open(os.path.join(ROOT, RESULT, "saved_train_images/" + str(epoch) + "_" + str(i) + "_output.p"), "wb"))
# pickle.dump(np.transpose(data.decode_segmap(labels[0].cpu().numpy()), [2, 0, 1]),
# open(os.path.join(ROOT, RESULT, "saved_train_images/" + str(epoch) + "_" + str(i) + "_target.p"), "wb"))
def val(model, criterions, valloader, epoch, data):
global l_avg_test, totalclasswise_pixel_acc_test, totalclasswise_gtpixels_test, totalclasswise_predpixels_test
global steps_test
model.eval()
for i, (images, sbd_labels, lip_labels) in enumerate(valloader):
sbd_valid_pixel = float( (sbd_labels.data != criterions[0].ignore_index).long().sum() )
lip_valid_pixel = float( (lip_labels.data != criterions[1].ignore_index).long().sum() )
images = images.to(device)
sbd_labels = sbd_labels.to(device)
lip_labels = lip_labels.to(device)
with torch.no_grad():
sbd_outputs, lip_outputs = model(images, task=2)
sbd_loss = criterions[0](sbd_outputs, sbd_labels)
lip_loss = criterions[1](lip_outputs, lip_labels)
classwise_pixel_acc, classwise_gtpixels, classwise_predpixels = prediction_stat([sbd_outputs], sbd_labels, data.n_classes[0])
classwise_pixel_acc = torch.FloatTensor([classwise_pixel_acc])
classwise_gtpixels = torch.FloatTensor([classwise_gtpixels])
classwise_predpixels = torch.FloatTensor([classwise_predpixels])
totalclasswise_pixel_acc_test[0] += classwise_pixel_acc.sum(0).data.numpy()
totalclasswise_gtpixels_test[0] += classwise_gtpixels.sum(0).data.numpy()
totalclasswise_predpixels_test[0] += classwise_predpixels.sum(0).data.numpy()
classwise_pixel_acc, classwise_gtpixels, classwise_predpixels = prediction_stat([lip_outputs], lip_labels, data.n_classes[1])
classwise_pixel_acc = torch.FloatTensor([classwise_pixel_acc])
classwise_gtpixels = torch.FloatTensor([classwise_gtpixels])
classwise_predpixels = torch.FloatTensor([classwise_predpixels])
totalclasswise_pixel_acc_test[1] += classwise_pixel_acc.sum(0).data.numpy()
totalclasswise_gtpixels_test[1] += classwise_gtpixels.sum(0).data.numpy()
totalclasswise_predpixels_test[1] += classwise_predpixels.sum(0).data.numpy()
l_avg_test[0] += sbd_loss.sum().data.cpu().numpy()
steps_test[0] += sbd_valid_pixel
l_avg_test[1] += lip_loss.sum().data.cpu().numpy()
steps_test[1] += lip_valid_pixel
# if (i + 1) % 800 == 0:
# pickle.dump(images[0].cpu().numpy(),
# open(os.path.join(ROOT, RESULT, "saved_val_images/" + str(epoch) + "_" + str(i) + "_input.p"), "wb"))
# pickle.dump(np.transpose(data.decode_segmap(outputs[0].data.cpu().numpy().argmax(0)), [2, 0, 1]),
# open(os.path.join(ROOT, RESULT, "saved_val_images/" + str(epoch) + "_" + str(i) + "_output.p"), "wb"))
# pickle.dump(np.transpose(data.decode_segmap(labels[0].cpu().numpy()), [2, 0, 1]),
# open(os.path.join(ROOT, RESULT, "saved_val_images/" + str(epoch) + "_" + str(i) + "_target.p"), "wb"))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Hyperparams')
parser.add_argument('--arch', nargs='?', type=str, default='sunet64_multi',
help='Architecture to use [\'sunet64, sunet128, sunet7128 etc\']')
parser.add_argument('--model_path', help='Path to the saved model', type=str)
parser.add_argument('--best_model_path', help='Path to the saved best model', type=str)
parser.add_argument('--dataset', nargs='?', type=str, default='human',
help='Dataset to use [\'sbd, coco, cityscapes etc\']')
parser.add_argument('--img_rows', nargs='?', type=int, default=512,
help='Height of the input image')
parser.add_argument('--img_cols', nargs='?', type=int, default=512,
help='Width of the input image')
parser.add_argument('--epochs', nargs='?', type=int, default=90,
help='# of the epochs')
parser.add_argument('--batch_size', nargs='?', type=int, default=10,
help='Batch Size')
parser.add_argument('--lr', nargs='?', type=float, default=0.0005,
help='Learning Rate')
parser.add_argument('--manual_seed', default=0, type=int,
help='manual seed')
parser.add_argument('--iter_size', type=int, default=1,
help='number of batches per weight updates')
parser.add_argument('--log_size', type=int, default=400,
help='iteration period of logging segmented images')
parser.add_argument('--dprob', nargs='?', type=float, default=1e-7,
help='Dropout probability')
parser.add_argument('--momentum', nargs='?', type=float, default=0.95,
help='Momentum for SGD')
parser.add_argument('--momentum_bn', nargs='?', type=float, default=0.01,
help='Momentum for BN')
parser.add_argument('--weight_decay', nargs='?', type=float, default=1e-4,
help='Weight decay')
parser.add_argument('--output_stride', nargs='?', type=str, default='16',
help='Output stride to use [\'32, 16, 8 etc\']')
parser.add_argument('--freeze', action='store_true',
help='Freeze BN params')
parser.add_argument('--restore', action='store_true',
help='Restore Optimizer params')
parser.add_argument('--epoch_log_size', nargs='?', type=str, default=20,
help='Every [epoch_log_size] iterations to print loss in each epoch')
parser.add_argument('--pretrained', action='store_true',
help='Use pretrained ImageNet initialization or not')
parser.add_argument('--n_classes', nargs='?', type=int, action='append',
help='number of classes of the labels')
parser.add_argument('--optim', nargs='?', type=str, default='SGD',
help='Optimizer to use [\'SGD, Nesterov etc\']')
global args
args = parser.parse_args()
RESULT = '{}_{}_{}'.format(RESULT, args.arch, args.dataset)
if args.pretrained:
RESULT = RESULT + '_pretrained'
main(args)
| [((34, 4, 34, 33), 'random.seed', 'random.seed', ({(34, 16, 34, 32): 'args.manual_seed'}, {}), '(args.manual_seed)', False, 'import random\n'), ((35, 4, 35, 36), 'numpy.random.seed', 'np.random.seed', ({(35, 19, 35, 35): 'args.manual_seed'}, {}), '(args.manual_seed)', True, 'import numpy as np\n'), ((36, 4, 36, 39), 'torch.manual_seed', 'torch.manual_seed', ({(36, 22, 36, 38): 'args.manual_seed'}, {}), '(args.manual_seed)', False, 'import torch\n'), ((37, 7, 37, 32), 'torch.cuda.is_available', 'torch.cuda.is_available', ({}, {}), '()', False, 'import torch\n'), ((48, 18, 48, 42), 'main.loader.get_loader', 'get_loader', ({(48, 29, 48, 41): 'args.dataset'}, {}), '(args.dataset)', False, 'from main.loader import get_loader\n'), ((54, 23, 54, 57), 'transforms.MaskToTensor', 'extended_transforms.MaskToTensor', ({}, {}), '()', True, 'import transforms as extended_transforms\n'), ((57, 18, 57, 101), 'torch.utils.data.DataLoader', 'data.DataLoader', (), '', False, 'from torch.utils import data\n'), ((59, 16, 59, 98), 'torch.utils.data.DataLoader', 'data.DataLoader', (), '', False, 'from torch.utils import data\n'), ((133, 16, 138, 68), 'torch.optim.SGD', 'torch.optim.SGD', (), '', False, 'import torch\n'), ((165, 20, 165, 96), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', (), '', False, 'from torch import nn\n'), ((166, 20, 166, 96), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', (), '', False, 'from torch import nn\n'), ((395, 13, 395, 63), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((27, 34, 27, 59), 'torch.cuda.is_available', 'torch.cuda.is_available', ({}, {}), '()', False, 'import torch\n'), ((38, 8, 38, 52), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', ({(38, 35, 38, 51): 'args.manual_seed'}, {}), '(args.manual_seed)', False, 'import torch\n'), ((149, 20, 149, 132), 'torch.optim.lr_scheduler.LambdaLR', 'lr_scheduler.LambdaLR', (), '', False, 'from torch.optim import lr_scheduler\n'), ((159, 20, 159, 90), 'torch.optim.lr_scheduler.LambdaLR', 'lr_scheduler.LambdaLR', (), '', False, 'from torch.optim import lr_scheduler\n'), ((293, 72, 293, 133), 'loss.prediction_stat', 'prediction_stat', ({(293, 88, 293, 101): '[sbd_outputs]', (293, 103, 293, 113): 'sbd_labels', (293, 115, 293, 132): 'data.n_classes[0]'}, {}), '([sbd_outputs], sbd_labels, data.n_classes[0])', False, 'from loss import prediction_stat\n'), ((294, 30, 294, 70), 'torch.FloatTensor', 'torch.FloatTensor', ({(294, 48, 294, 69): '[classwise_pixel_acc]'}, {}), '([classwise_pixel_acc])', False, 'import torch\n'), ((295, 29, 295, 68), 'torch.FloatTensor', 'torch.FloatTensor', ({(295, 47, 295, 67): '[classwise_gtpixels]'}, {}), '([classwise_gtpixels])', False, 'import torch\n'), ((296, 31, 296, 72), 'torch.FloatTensor', 'torch.FloatTensor', ({(296, 49, 296, 71): '[classwise_predpixels]'}, {}), '([classwise_predpixels])', False, 'import torch\n'), ((308, 72, 308, 133), 'loss.prediction_stat', 'prediction_stat', ({(308, 88, 308, 101): '[lip_outputs]', (308, 103, 308, 113): 'lip_labels', (308, 115, 308, 132): 'data.n_classes[1]'}, {}), '([lip_outputs], lip_labels, data.n_classes[1])', False, 'from loss import prediction_stat\n'), ((309, 30, 309, 70), 'torch.FloatTensor', 'torch.FloatTensor', ({(309, 48, 309, 69): '[classwise_pixel_acc]'}, {}), '([classwise_pixel_acc])', False, 'import torch\n'), ((310, 29, 310, 68), 'torch.FloatTensor', 'torch.FloatTensor', ({(310, 47, 310, 67): '[classwise_gtpixels]'}, {}), '([classwise_gtpixels])', False, 'import torch\n'), ((311, 31, 311, 72), 'torch.FloatTensor', 'torch.FloatTensor', ({(311, 49, 311, 71): '[classwise_predpixels]'}, {}), '([classwise_predpixels])', False, 'import torch\n'), ((42, 26, 42, 72), 'os.path.join', 'os.path.join', ({(42, 39, 42, 43): 'ROOT', (42, 45, 42, 51): 'RESULT', (42, 53, 42, 71): '"""saved_val_images"""'}, {}), "(ROOT, RESULT, 'saved_val_images')", False, 'import os\n'), ((43, 20, 43, 66), 'os.path.join', 'os.path.join', ({(43, 33, 43, 37): 'ROOT', (43, 39, 43, 45): 'RESULT', (43, 47, 43, 65): '"""saved_val_images"""'}, {}), "(ROOT, RESULT, 'saved_val_images')", False, 'import os\n'), ((44, 26, 44, 74), 'os.path.join', 'os.path.join', ({(44, 39, 44, 43): 'ROOT', (44, 45, 44, 51): 'RESULT', (44, 53, 44, 73): '"""saved_train_images"""'}, {}), "(ROOT, RESULT, 'saved_train_images')", False, 'import os\n'), ((45, 20, 45, 68), 'os.path.join', 'os.path.join', ({(45, 33, 45, 37): 'ROOT', (45, 39, 45, 45): 'RESULT', (45, 47, 45, 67): '"""saved_train_images"""'}, {}), "(ROOT, RESULT, 'saved_train_images')", False, 'import os\n'), ((51, 8, 51, 29), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ({}, {}), '()', True, 'import torchvision.transforms as transforms\n'), ((52, 8, 52, 74), 'torchvision.transforms.Normalize', 'transforms.Normalize', ({(52, 29, 52, 50): '[0.485, 0.456, 0.406]', (52, 52, 52, 73): '[0.229, 0.224, 0.225]'}, {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])', True, 'import torchvision.transforms as transforms\n'), ((66, 12, 74, 5), 'main.models.get_model', 'get_model', (), '', False, 'from main.models import get_model\n'), ((94, 32, 94, 75), 'os.path.join', 'os.path.join', ({(94, 45, 94, 49): 'ROOT', (94, 51, 94, 57): 'RESULT', (94, 59, 94, 74): 'checkpoint_name'}, {}), '(ROOT, RESULT, checkpoint_name)', False, 'import os\n'), ((196, 26, 196, 68), 'os.path.join', 'os.path.join', ({(196, 39, 196, 43): 'ROOT', (196, 45, 196, 51): 'RESULT', (196, 53, 196, 67): '"""saved_loss.p"""'}, {}), "(ROOT, RESULT, 'saved_loss.p')", False, 'import os\n'), ((198, 26, 198, 72), 'os.path.join', 'os.path.join', ({(198, 39, 198, 43): 'ROOT', (198, 45, 198, 51): 'RESULT', (198, 53, 198, 71): '"""saved_accuracy.p"""'}, {}), "(ROOT, RESULT, 'saved_accuracy.p')", False, 'import os\n'), ((272, 21, 272, 177), 'numpy.mean', 'np.mean', (), '', True, 'import numpy as np\n'), ((273, 21, 273, 177), 'numpy.mean', 'np.mean', (), '', True, 'import numpy as np\n'), ((354, 13, 354, 28), 'torch.no_grad', 'torch.no_grad', ({}, {}), '()', False, 'import torch\n'), ((359, 76, 359, 137), 'loss.prediction_stat', 'prediction_stat', ({(359, 92, 359, 105): '[sbd_outputs]', (359, 107, 359, 117): 'sbd_labels', (359, 119, 359, 136): 'data.n_classes[0]'}, {}), '([sbd_outputs], sbd_labels, data.n_classes[0])', False, 'from loss import prediction_stat\n'), ((360, 34, 360, 74), 'torch.FloatTensor', 'torch.FloatTensor', ({(360, 52, 360, 73): '[classwise_pixel_acc]'}, {}), '([classwise_pixel_acc])', False, 'import torch\n'), ((361, 33, 361, 72), 'torch.FloatTensor', 'torch.FloatTensor', ({(361, 51, 361, 71): '[classwise_gtpixels]'}, {}), '([classwise_gtpixels])', False, 'import torch\n'), ((362, 35, 362, 76), 'torch.FloatTensor', 'torch.FloatTensor', ({(362, 53, 362, 75): '[classwise_predpixels]'}, {}), '([classwise_predpixels])', False, 'import torch\n'), ((368, 76, 368, 137), 'loss.prediction_stat', 'prediction_stat', ({(368, 92, 368, 105): '[lip_outputs]', (368, 107, 368, 117): 'lip_labels', (368, 119, 368, 136): 'data.n_classes[1]'}, {}), '([lip_outputs], lip_labels, data.n_classes[1])', False, 'from loss import prediction_stat\n'), ((369, 34, 369, 74), 'torch.FloatTensor', 'torch.FloatTensor', ({(369, 52, 369, 73): '[classwise_pixel_acc]'}, {}), '([classwise_pixel_acc])', False, 'import torch\n'), ((370, 33, 370, 72), 'torch.FloatTensor', 'torch.FloatTensor', ({(370, 51, 370, 71): '[classwise_gtpixels]'}, {}), '([classwise_gtpixels])', False, 'import torch\n'), ((371, 35, 371, 76), 'torch.FloatTensor', 'torch.FloatTensor', ({(371, 53, 371, 75): '[classwise_predpixels]'}, {}), '([classwise_predpixels])', False, 'import torch\n'), ((99, 39, 99, 81), 'os.path.join', 'os.path.join', ({(99, 52, 99, 56): 'ROOT', (99, 58, 99, 64): 'RESULT', (99, 66, 99, 80): '"""saved_loss.p"""'}, {}), "(ROOT, RESULT, 'saved_loss.p')", False, 'import os\n'), ((100, 43, 100, 89), 'os.path.join', 'os.path.join', ({(100, 56, 100, 60): 'ROOT', (100, 62, 100, 68): 'RESULT', (100, 70, 100, 88): '"""saved_accuracy.p"""'}, {}), "(ROOT, RESULT, 'saved_accuracy.p')", False, 'import os\n'), ((197, 22, 197, 64), 'os.path.join', 'os.path.join', ({(197, 35, 197, 39): 'ROOT', (197, 41, 197, 47): 'RESULT', (197, 49, 197, 63): '"""saved_loss.p"""'}, {}), "(ROOT, RESULT, 'saved_loss.p')", False, 'import os\n'), ((199, 22, 199, 68), 'os.path.join', 'os.path.join', ({(199, 35, 199, 39): 'ROOT', (199, 41, 199, 47): 'RESULT', (199, 49, 199, 67): '"""saved_accuracy.p"""'}, {}), "(ROOT, RESULT, 'saved_accuracy.p')", False, 'import os\n'), ((208, 37, 208, 79), 'os.path.join', 'os.path.join', ({(208, 50, 208, 54): 'ROOT', (208, 56, 208, 62): 'RESULT', (208, 64, 208, 78): '"""saved_loss.p"""'}, {}), "(ROOT, RESULT, 'saved_loss.p')", False, 'import os\n'), ((244, 35, 244, 108), 'numpy.mean', 'np.mean', (), '', True, 'import numpy as np\n'), ((245, 25, 245, 161), 'numpy.mean', 'np.mean', (), '', True, 'import numpy as np\n'), ((247, 35, 247, 108), 'numpy.mean', 'np.mean', (), '', True, 'import numpy as np\n'), ((248, 25, 248, 161), 'numpy.mean', 'np.mean', (), '', True, 'import numpy as np\n'), ((254, 40, 254, 123), 'numpy.mean', 'np.mean', (), '', True, 'import numpy as np\n'), ((255, 30, 255, 186), 'numpy.mean', 'np.mean', (), '', True, 'import numpy as np\n'), ((257, 40, 257, 123), 'numpy.mean', 'np.mean', (), '', True, 'import numpy as np\n'), ((258, 30, 258, 186), 'numpy.mean', 'np.mean', (), '', True, 'import numpy as np\n'), ((269, 41, 269, 87), 'os.path.join', 'os.path.join', ({(269, 54, 269, 58): 'ROOT', (269, 60, 269, 66): 'RESULT', (269, 68, 269, 86): '"""saved_accuracy.p"""'}, {}), "(ROOT, RESULT, 'saved_accuracy.p')", False, 'import os\n'), ((148, 43, 148, 79), 'math.cos', 'math.cos', ({(148, 52, 148, 78): '(np.pi * step / total_iters)'}, {}), '(np.pi * step / total_iters)', False, 'import math\n'), ((158, 43, 158, 79), 'math.cos', 'math.cos', ({(158, 52, 158, 78): '(np.pi * step / total_iters)'}, {}), '(np.pi * step / total_iters)', False, 'import math\n'), ((227, 62, 227, 135), 'numpy.mean', 'np.mean', (), '', True, 'import numpy as np\n'), ((228, 42, 228, 178), 'numpy.mean', 'np.mean', (), '', True, 'import numpy as np\n'), ((230, 62, 230, 135), 'numpy.mean', 'np.mean', (), '', True, 'import numpy as np\n'), ((231, 42, 231, 178), 'numpy.mean', 'np.mean', (), '', True, 'import numpy as np\n'), ((234, 72, 234, 155), 'numpy.mean', 'np.mean', (), '', True, 'import numpy as np\n'), ((235, 52, 235, 208), 'numpy.mean', 'np.mean', (), '', True, 'import numpy as np\n'), ((237, 72, 237, 155), 'numpy.mean', 'np.mean', (), '', True, 'import numpy as np\n'), ((238, 52, 238, 208), 'numpy.mean', 'np.mean', (), '', True, 'import numpy as np\n'), ((243, 34, 243, 77), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((243, 80, 243, 122), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((246, 34, 246, 77), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((246, 80, 246, 122), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((253, 39, 253, 87), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((253, 90, 253, 137), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((256, 39, 256, 87), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((256, 90, 256, 137), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((226, 60, 226, 103), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((226, 106, 226, 148), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((229, 60, 229, 103), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((229, 106, 229, 148), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((233, 70, 233, 117), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((233, 120, 233, 167), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((236, 70, 236, 117), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((236, 120, 236, 167), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n')] |
quitaiskiluisf/TI4F-2021-LogicaProgramacao | exemplos/exemplo-aula-14-01.py | d12e5c389a43c98f27726df5618fe529183329a8 | # Apresentação
print('Programa para somar 8 valores utilizando vetores/listas')
print()
# Declaração do vetor
valores = [0, 0, 0, 0, 0, 0, 0, 0]
# Solicita os valores
for i in range(len(valores)):
valores[i] = int(input('Informe o valor: '))
# Cálculo da soma
soma = 0
for i in range(len(valores)):
soma += valores[i]
# Apresenta o resultado
print(f'A soma dos valores é {soma}')
| [] |
pwicks86/adventofcode2015 | day3/p1.py | fba7cc8f6942f43f5b0226a0ac70365630f14cbd | from collections import defaultdict
f = open("input.txt")
d = f.read()
houses = defaultdict(int,{(0,0):1})
cur = [0,0]
for c in d:
if c == "<":
cur[0] -= 1
if c == ">":
cur[0] += 1
if c == "v":
cur[1] += 1
if c == "^":
cur[1] -= 1
houses[tuple(cur)]+=1
print(len(houses.keys()))
| [((4, 9, 4, 35), 'collections.defaultdict', 'defaultdict', ({(4, 21, 4, 24): 'int', (4, 25, 4, 34): '{(0, 0): 1}'}, {}), '(int, {(0, 0): 1})', False, 'from collections import defaultdict\n')] |
NUbots/NUpbr | pbr/config/blend_config.py | 49b0d2abd15512a93bfe21157269288c9ec4c54d | # Blender-specific Configuration Settings
from math import pi
render = {
"render_engine": "CYCLES",
"render": {"cycles_device": "GPU"},
"dimensions": {"resolution": [1280, 1024], "percentage": 100.0},
"sampling": {"cycles_samples": 256, "cycles_preview_samples": 16},
"light_paths": {
"transparency": {"max_bounces": 1, "min_bounces": 1},
"bounces": {"max_bounces": 1, "min_bounces": 1},
"diffuse": 1,
"glossy": 1,
"transmission": 1,
"volume": 0,
"reflective_caustics": False,
"refractive_caustics": False,
},
"performance": {
"render_tile": [512, 512],
"threads": {"mode": "FIXED", "num_threads": 8},
},
"layers": {"use_hair": False},
}
scene = {"units": {"length_units": "METRIC", "rotation_units": "DEGREES"}}
layers = {"denoising": {"use_denoising": False}}
field = {
"material": {
"mapping": {
"translation": (0.0, 0.05, 0.0),
"rotation": (0.0, -pi / 2.0, 0.0),
"scale": (1.0, 0.6, 1.0),
},
"mix_lower_grass": {
"inp1": (0.000, 0.012, 0.00076, 1.0),
"inp2": (0.020, 0.011, 0.0, 1.0),
},
"mix_upper_grass": {
"inp1": (0.247, 0.549, 0.0, 1),
"inp2": (0.257, 0.272, 0.0, 1),
},
"noise": {"inp": [5.0, 2.0, 0.0]},
"hsv": {"inp": [0.0, 0.0, 1.9, 1.0]},
"mix_up_grass_hsv": {"inp0": 0.455},
"mix_low_grass_field_lines": {"inp0": 0.4},
"mix_grass": {"inp0": 0.391},
"principled": {"specular": 0.225, "roughness": 0.625},
},
"lower_plane": {
"colour": (0.003, 0.04, 0.0, 1.0),
"principled": {"specular": 0.225, "roughness": 1.0},
"mapping": {"scale": (0.1, 0.1, 1.0)},
},
}
ball = {
"initial_cond": {"segments": 16, "ring_count": 10, "calc_uvs": True},
"material": {"metallic": 0.0, "roughness": 0.35},
"subsurf_mod": {"levels": 1, "rend_levels": 4},
}
goal = {
"initial_cond": {"vertices": 32, "calc_uvs": True},
"corner_curve": {"fill": "FULL"},
"material": {"metallic": 0.0, "roughness": 0.35, "colour": (0.8, 0.8, 0.8, 1.0)},
"subsurf_mod": {"levels": 1, "rend_levels": 4},
}
robot = {"material": {"specular": 0.742, "metallic": 0.0, "roughness": 0.9}}
| [] |
mia-jingyi/simglucose | simglucose/controller/basal_bolus_ctrller.py | a90bd8750fce362be91668ed839b3b252bc0d58d | from .base import Controller
from .base import Action
import numpy as np
import pandas as pd
import logging
from collections import namedtuple
from tqdm import tqdm
logger = logging.getLogger(__name__)
CONTROL_QUEST = 'simglucose/params/Quest.csv'
PATIENT_PARA_FILE = 'simglucose/params/vpatient_params.csv'
ParamTup = namedtuple('ParamTup', ['basal', 'cf', 'cr'])
class BBController(Controller):
"""
This is a Basal-Bolus Controller that is typically practiced by a Type-1
Diabetes patient. The performance of this controller can serve as a
baseline when developing a more advanced controller.
"""
def __init__(self, target=140):
self.quest = pd.read_csv(CONTROL_QUEST)
self.patient_params = pd.read_csv(PATIENT_PARA_FILE)
self.target = target
def policy(self, observation, reward, done, **kwargs):
sample_time = kwargs.get('sample_time', 1)
pname = kwargs.get('patient_name')
meal = kwargs.get('meal') # unit: g/min
action = self._bb_policy(pname, meal, observation.CGM, sample_time)
return action
def _bb_policy(self, name, meal, glucose, env_sample_time):
"""
Helper function to compute the basal and bolus amount.
The basal insulin is based on the insulin amount to keep the blood
glucose in the steady state when there is no (meal) disturbance.
basal = u2ss (pmol/(L*kg)) * body_weight (kg) / 6000 (U/min)
The bolus amount is computed based on the current glucose level, the
target glucose level, the patient's correction factor and the patient's
carbohydrate ratio.
bolus = ((carbohydrate / carbohydrate_ratio) +
(current_glucose - target_glucose) / correction_factor)
/ sample_time
NOTE the bolus computed from the above formula is in unit U. The
simulator only accepts insulin rate. Hence the bolus is converted to
insulin rate.
"""
if any(self.quest.Name.str.match(name)):
quest = self.quest[self.quest.Name.str.match(name)]
params = self.patient_params[self.patient_params.Name.str.match(
name)]
u2ss = params.u2ss.values.item() # unit: pmol/(L*kg)
BW = params.BW.values.item() # unit: kg
else:
quest = pd.DataFrame([['Average', 13.5, 23.52, 50, 30]],
columns=['Name', 'CR', 'CF', 'TDI', 'Age'])
u2ss = 1.43 # unit: pmol/(L*kg)
BW = 57.0 # unit: kg
basal = u2ss * BW / 6000 # unit: U/min
if meal > 0:
logger.info('Calculating bolus ...')
logger.info(f'Meal = {meal} g/min')
logger.info(f'glucose = {glucose}')
bolus = (
(meal * env_sample_time) / quest.CR.values + (glucose > 150) *
(glucose - self.target) / quest.CF.values).item() # unit: U
else:
bolus = 0 # unit: U
# This is to convert bolus in total amount (U) to insulin rate (U/min).
# The simulation environment does not treat basal and bolus
# differently. The unit of Action.basal and Action.bolus are the same
# (U/min).
bolus = bolus / env_sample_time # unit: U/min
return Action(basal=basal, bolus=bolus)
def reset(self):
pass
class ManualBBController(Controller):
def __init__(self, target, cr, cf, basal, sample_rate=5, use_cf=True, use_bol=True, cooldown=0,
corrected=True, use_low_lim=False, low_lim=70):
super().__init__(self)
self.target = target
self.orig_cr = self.cr = cr
self.orig_cf = self.cf = cf
self.orig_basal = self.basal = basal
self.sample_rate = sample_rate
self.use_cf = use_cf
self.use_bol = use_bol
self.cooldown = cooldown
self.last_cf = np.inf
self.corrected = corrected
self.use_low_lim = use_low_lim
self.low_lim = low_lim
def increment(self, cr_incr=0, cf_incr=0, basal_incr=0):
self.cr += cr_incr
self.cf += cf_incr
self.basal += basal_incr
def policy(self, observation, reward, done, **kwargs):
carbs = kwargs.get('carbs')
glucose = kwargs.get('glucose')
action = self.manual_bb_policy(carbs, glucose)
return action
def manual_bb_policy(self, carbs, glucose, log=False):
if carbs > 0:
if self.corrected:
carb_correct = carbs / self.cr
else:
# assuming carbs are already multiplied by sampling rate
carb_correct = (carbs/self.sample_rate) / self.cr
hyper_correct = (glucose > self.target) * (glucose - self.target) / self.cf
hypo_correct = (glucose < self.low_lim) * (self.low_lim - glucose) / self.cf
bolus = 0
if self.use_low_lim:
bolus -= hypo_correct
if self.use_cf:
if self.last_cf > self.cooldown and hyper_correct > 0:
bolus += hyper_correct
self.last_cf = 0
if self.use_bol:
bolus += carb_correct
bolus = bolus / self.sample_rate
else:
bolus = 0
carb_correct = 0
hyper_correct = 0
hypo_correct = 0
self.last_cf += self.sample_rate
if log:
return Action(basal=self.basal, bolus=bolus), hyper_correct, hypo_correct, carb_correct
else:
return Action(basal=self.basal, bolus=bolus)
def get_params(self):
return ParamTup(basal=self.basal, cf=self.cf, cr=self.cr)
def adjust(self, basal_adj, cr_adj):
self.basal += self.orig_basal + basal_adj
self.cr = self.orig_cr * cr_adj
def reset(self):
self.cr = self.orig_cr
self.cf = self.orig_cf
self.basal = self.orig_basal
self.last_cf = np.inf
def bb_test(bbc, env, n_days, seed, full_save=False):
env.seeds['sensor'] = seed
env.seeds['scenario'] = seed
env.seeds['patient'] = seed
env.reset()
full_patient_state = []
carb_error_mean = 0
carb_error_std = 0.2
carb_miss_prob = 0.05
action = bbc.manual_bb_policy(carbs=0, glucose=140)
for _ in tqdm(range(n_days*288)):
obs, reward, done, info = env.step(action=action.basal+action.bolus)
bg = env.env.CGM_hist[-1]
carbs = info['meal']
if np.random.uniform() < carb_miss_prob:
carbs = 0
err = np.random.normal(carb_error_mean, carb_error_std)
carbs = carbs + carbs * err
action = bbc.manual_bb_policy(carbs=carbs, glucose=bg)
full_patient_state.append(info['patient_state'])
full_patient_state = np.stack(full_patient_state)
if full_save:
return env.env.show_history(), full_patient_state
else:
return {'hist': env.env.show_history()[288:]} | [((9, 9, 9, 36), 'logging.getLogger', 'logging.getLogger', ({(9, 27, 9, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((12, 11, 12, 56), 'collections.namedtuple', 'namedtuple', ({(12, 22, 12, 32): '"""ParamTup"""', (12, 34, 12, 55): "['basal', 'cf', 'cr']"}, {}), "('ParamTup', ['basal', 'cf', 'cr'])", False, 'from collections import namedtuple\n'), ((178, 25, 178, 53), 'numpy.stack', 'np.stack', ({(178, 34, 178, 52): 'full_patient_state'}, {}), '(full_patient_state)', True, 'import numpy as np\n'), ((22, 21, 22, 47), 'pandas.read_csv', 'pd.read_csv', ({(22, 33, 22, 46): 'CONTROL_QUEST'}, {}), '(CONTROL_QUEST)', True, 'import pandas as pd\n'), ((23, 30, 23, 60), 'pandas.read_csv', 'pd.read_csv', ({(23, 42, 23, 59): 'PATIENT_PARA_FILE'}, {}), '(PATIENT_PARA_FILE)', True, 'import pandas as pd\n'), ((174, 14, 174, 63), 'numpy.random.normal', 'np.random.normal', ({(174, 31, 174, 46): 'carb_error_mean', (174, 48, 174, 62): 'carb_error_std'}, {}), '(carb_error_mean, carb_error_std)', True, 'import numpy as np\n'), ((59, 20, 60, 76), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((172, 11, 172, 30), 'numpy.random.uniform', 'np.random.uniform', ({}, {}), '()', True, 'import numpy as np\n')] |
redhat-openstack/ceilometer | ceilometer/event/trait_plugins.py | 9e503d7068889e52e9144079de331ed51676e535 | #
# Copyright 2013 Rackspace Hosting.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class TraitPluginBase(object):
"""Base class for plugins.
It converts notification fields to Trait values.
"""
def __init__(self, **kw):
"""Setup the trait plugin.
For each Trait definition a plugin is used on in a conversion
definition, a new instance of the plugin will be created, and
initialized with the parameters (if any) specified in the
config file.
:param kw: the parameters specified in the event definitions file.
"""
super(TraitPluginBase, self).__init__()
@abc.abstractmethod
def trait_value(self, match_list):
"""Convert a set of fields to a Trait value.
This method is called each time a trait is attempted to be extracted
from a notification. It will be called *even if* no matching fields
are found in the notification (in that case, the match_list will be
empty). If this method returns None, the trait *will not* be added to
the event. Any other value returned by this method will be used as
the value for the trait. Values returned will be coerced to the
appropriate type for the trait.
:param match_list: A list (may be empty if no matches) of *tuples*.
Each tuple is (field_path, value) where field_path is the jsonpath
for that specific field.
Example::
trait's fields definition: ['payload.foobar',
'payload.baz',
'payload.thing.*']
notification body:
{
'message_id': '12345',
'publisher': 'someservice.host',
'payload': {
'foobar': 'test',
'thing': {
'bar': 12,
'boing': 13,
}
}
}
match_list will be: [('payload.foobar','test'),
('payload.thing.bar',12),
('payload.thing.boing',13)]
Here is a plugin that emulates the default (no plugin) behavior:
.. code-block:: python
class DefaultPlugin(TraitPluginBase):
"Plugin that returns the first field value."
def __init__(self, **kw):
super(DefaultPlugin, self).__init__()
def trait_value(self, match_list):
if not match_list:
return None
return match_list[0][1]
"""
class SplitterTraitPlugin(TraitPluginBase):
"""Plugin that splits a piece off of a string value."""
def __init__(self, separator=".", segment=0, max_split=None, **kw):
"""Setup how do split the field.
:param separator: String to split on. default "."
:param segment: Which segment to return. (int) default 0
:param max_split: Limit number of splits. Default: None (no limit)
"""
self.separator = separator
self.segment = segment
self.max_split = max_split
super(SplitterTraitPlugin, self).__init__(**kw)
def trait_value(self, match_list):
if not match_list:
return None
value = six.text_type(match_list[0][1])
if self.max_split is not None:
values = value.split(self.separator, self.max_split)
else:
values = value.split(self.separator)
try:
return values[self.segment]
except IndexError:
return None
class BitfieldTraitPlugin(TraitPluginBase):
"""Plugin to set flags on a bitfield."""
def __init__(self, initial_bitfield=0, flags=None, **kw):
"""Setup bitfield trait.
:param initial_bitfield: (int) initial value for the bitfield
Flags that are set will be OR'ed with this.
:param flags: List of dictionaries defining bitflags to set depending
on data in the notification. Each one has the following
keys:
path: jsonpath of field to match.
bit: (int) number of bit to set (lsb is bit 0)
value: set bit if corresponding field's value
matches this. If value is not provided,
bit will be set if the field exists (and
is non-null), regardless of it's value.
"""
self.initial_bitfield = initial_bitfield
if flags is None:
flags = []
self.flags = flags
super(BitfieldTraitPlugin, self).__init__(**kw)
def trait_value(self, match_list):
matches = dict(match_list)
bitfield = self.initial_bitfield
for flagdef in self.flags:
path = flagdef['path']
bit = 2 ** int(flagdef['bit'])
if path in matches:
if 'value' in flagdef:
if matches[path] == flagdef['value']:
bitfield |= bit
else:
bitfield |= bit
return bitfield
| [((21, 1, 21, 31), 'six.add_metaclass', 'six.add_metaclass', ({(21, 19, 21, 30): 'abc.ABCMeta'}, {}), '(abc.ABCMeta)', False, 'import six\n'), ((113, 16, 113, 47), 'six.text_type', 'six.text_type', ({(113, 30, 113, 46): 'match_list[0][1]'}, {}), '(match_list[0][1])', False, 'import six\n')] |
gongjunhuang/web | web13/jsonapi.py | 9412f6fd7c223174fdb30f4d7a8b61a8e130e329 | from flask import Flask, redirect, url_for, jsonify, request
app = Flask(__name__)
users = []
'''
Json api
请求form里面Json 返回Json
好处:
1.通信的格式统一,对语言的约束就小了
2.易于做成open api
3.客户端重度渲染
RESTful api
Dr. Fielding
url 用资源来组织的 名词
/GET /players 拿到所有玩家
/GET /player/id 访问id的玩家的数据
/PUT /players 全量更新
/PATCH /players 部分更新
/DELETE /player/id 删除一个玩家
/GET /player/id/level
'''
@app.route("/", methods=["GET"])
def index():
return'''<form method=post action='/add'>
<input type=text name=author>
<button>提交</button>
</form>
'''
@app.route("/add", methods=["POST"])
def add():
form = request.form
users.append(dict(author=form.get("author", "")))
return redirect(url_for(".index"))
@app.route("/json")
def json():
return jsonify(users)
app.run() | [((3, 6, 3, 21), 'flask.Flask', 'Flask', ({(3, 12, 3, 20): '__name__'}, {}), '(__name__)', False, 'from flask import Flask, redirect, url_for, jsonify, request\n'), ((46, 11, 46, 25), 'flask.jsonify', 'jsonify', ({(46, 19, 46, 24): 'users'}, {}), '(users)', False, 'from flask import Flask, redirect, url_for, jsonify, request\n'), ((41, 20, 41, 37), 'flask.url_for', 'url_for', ({(41, 28, 41, 36): '""".index"""'}, {}), "('.index')", False, 'from flask import Flask, redirect, url_for, jsonify, request\n')] |
mhndlsz/memodrop | cards/migrations/0012_auto_20180331_1348.py | 7ba39143c8e4fbe67881b141accedef535e936e6 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-03-31 13:48
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cards', '0011_auto_20180319_1112'),
]
operations = [
migrations.AlterField(
model_name='card',
name='answer',
field=models.TextField(verbose_name='Answer'),
),
migrations.AlterField(
model_name='card',
name='hint',
field=models.TextField(blank=True, verbose_name='Hint'),
),
migrations.AlterField(
model_name='card',
name='question',
field=models.TextField(verbose_name='Question'),
),
]
| [((18, 18, 18, 57), 'django.db.models.TextField', 'models.TextField', (), '', False, 'from django.db import migrations, models\n'), ((23, 18, 23, 67), 'django.db.models.TextField', 'models.TextField', (), '', False, 'from django.db import migrations, models\n'), ((28, 18, 28, 59), 'django.db.models.TextField', 'models.TextField', (), '', False, 'from django.db import migrations, models\n')] |
T6751/MoMMI | MoMMI/Modules/ss14_nudges.py | 4b9dd0d49c6e2bd82b82a4893fc35475d4e39e9a | import logging
from typing import Match, Any, Dict
import aiohttp
from discord import Message
from MoMMI import comm_event, command, MChannel, always_command
logger = logging.getLogger(__name__)
@comm_event("ss14")
async def ss14_nudge(channel: MChannel, message: Any, meta: str) -> None:
try:
config: Dict[str, Any] = channel.module_config(f"ss14.servers.{meta}")
except ValueError:
return
expect_password = config["password"]
if expect_password != message.get("password"):
return
if "type" not in message or "contents" not in message:
return
contents = message["contents"]
type = message["type"]
if type == "ooc":
final_message = f"\u200B**OOC**: `{contents['sender']}`: {contents['contents']}"
else:
return
await channel.send(final_message)
@always_command("ss14_relay", unsafe=True)
async def ss14_relay(channel: MChannel, match: Match, message: Message) -> None:
if not channel.internal_name:
return
content = message.content
content = content.strip()
if not content or content[0] == "\u200B":
return
server = None
config: Any
for config in channel.server_config("modules.ss14", []):
if config["discord_channel"] != channel.internal_name:
continue
server = config["server"]
if not server:
return
config = channel.module_config(f"ss14.servers.{server}")
password = config["password"]
url = config["api_url"] + "/ooc"
async with aiohttp.ClientSession() as session:
async with session.post(url, json={"password": password, "sender": message.author.name, "contents": content}) as resp:
r = await resp.text()
logger.error(f"{resp.status}")
| [((7, 9, 7, 36), 'logging.getLogger', 'logging.getLogger', ({(7, 27, 7, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((9, 1, 9, 19), 'MoMMI.comm_event', 'comm_event', ({(9, 12, 9, 18): '"""ss14"""'}, {}), "('ss14')", False, 'from MoMMI import comm_event, command, MChannel, always_command\n'), ((33, 1, 33, 42), 'MoMMI.always_command', 'always_command', (), '', False, 'from MoMMI import comm_event, command, MChannel, always_command\n'), ((60, 15, 60, 38), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ({}, {}), '()', False, 'import aiohttp\n')] |
CybersecurityLuxembourg/openxeco | oxe-api/test/resource/company/test_get_company_taxonomy.py | 8d4e5578bde6a07f5d6d569b16b4de224abf7bf0 | from test.BaseCase import BaseCase
class TestGetCompanyTaxonomy(BaseCase):
@BaseCase.login
def test_ok(self, token):
self.db.insert({"id": 1, "name": "My Company"}, self.db.tables["Company"])
self.db.insert({"id": 2, "name": "My Company 2"}, self.db.tables["Company"])
self.db.insert({"name": "CAT1"}, self.db.tables["TaxonomyCategory"])
self.db.insert({"name": "CAT2"}, self.db.tables["TaxonomyCategory"])
self.db.insert({"id": 1, "name": "VAL1", "category": "CAT1"}, self.db.tables["TaxonomyValue"])
self.db.insert({"id": 2, "name": "VAL2", "category": "CAT2"}, self.db.tables["TaxonomyValue"])
self.db.insert({"company": 1, "taxonomy_value": 1}, self.db.tables["TaxonomyAssignment"])
self.db.insert({"company": 1, "taxonomy_value": 2}, self.db.tables["TaxonomyAssignment"])
self.db.insert({"company": 2, "taxonomy_value": 2}, self.db.tables["TaxonomyAssignment"])
response = self.application.get('/company/get_company_taxonomy/2',
headers=self.get_standard_header(token))
self.assertEqual([{'company': 2, 'taxonomy_value': 2}], response.json)
self.assertEqual(200, response.status_code)
@BaseCase.login
def test_ok_empty(self, token):
self.db.insert({"id": 2, "name": "My Company"}, self.db.tables["Company"])
response = self.application.get('/company/get_company_taxonomy/2',
headers=self.get_standard_header(token))
self.assertEqual(response.json, [])
self.assertEqual(200, response.status_code)
| [] |
mikeboers/Spoon | spoon/models/groupmembership.py | 9fe4a06be7c2c6c307b79e72893e32f2006de4ea | import sqlalchemy as sa
from ..core import db
class GroupMembership(db.Model):
__tablename__ = 'group_memberships'
__table_args__ = dict(
autoload=True,
extend_existing=True,
)
user = db.relationship('Account',
foreign_keys='GroupMembership.user_id',
backref=db.backref('groups', cascade="all, delete-orphan"),
)
group = db.relationship('Account',
foreign_keys='GroupMembership.group_id',
backref=db.backref('members', cascade="all, delete-orphan"),
)
| [] |
HaolinCMU/Soft_tissue_tracking | nonlinear/aorta/nonlinearCasesCreation_aorta.py | 8592b87066ddec84a3aefc18240303cb085cf34c | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 25 13:08:16 2020
@author: haolinl
"""
import copy
import os
import time
import numpy as np
import random
import scipy.io # For extracting data from .mat file
class inputFileGenerator(object):
"""
Generate input file for Abaqus.
Unit system:
Length: m
Force: N
Pressure: Pa
"""
def __init__(self, data_file_name, write_path, material_type, fix_indices_list, node_variable_name, elem_variable_name, user_prescribed_force_field=[]):
"""
Initialize parameters.
Parameters:
----------
data_file_name: String.
The file path of information of node, element, etc.
write_path: String.
The path to write the inp file.
material_type: String.
The type of material.
Used to indicate whether to consider material nonlinearity.
fix_indices_list: List of ints.
The node indices to be fixed.
node_variable_name: String.
The variable name of the nodes matrix in the data file.
elem_variable_name: String.
The variable name of the elements matrix in the data file.
user_prescribed_force_field (optional): List of floats.
The user-prescribed vector of laplacian force field.
Size: nSurfI x 3.
Default: [].
"""
# Data & Variables.
self.data_file_name = data_file_name
self.data_mat = scipy.io.loadmat(self.data_file_name)
self._surface_mat = self.data_mat["FaceI"]
self._surface_nodes = self.data_mat["idxSurfI"]
self._surface_nodes_num = self.data_mat["nSurfI"][0,0]
self._outer_surface_regionNum = 22 # Int. The region number of outer surface.
self._outer_surface_nodes_list = self._extractOuterSurfaceNodes(self.data_mat["faces"], self._outer_surface_regionNum) # List of sorted ints. The indices of outer surface nodes. Indexed from 1.
self._outer_surface_nodes_num = len(self._outer_surface_nodes_list)
self._triangle_nodes_list = []
self._coupled_list = []
self._node_variable_name = node_variable_name
self._elem_variable_name = elem_variable_name
self._inputFile_lines_total = []
self.writePath = write_path
self._modulus = 1e7 # Young's modulus. Unit: Pa. Default: 1e7.
self._poisson_ratio = 0.48 # Poisson's ratio. Linear elastic default: 0.3; neo-Hookean default: 0.48.
self._isCoupleOn = False # Boolean. True: use coupling constraint; False: do not use coupling constraint. Must not turn on if applying Laplacian smoothing.
self._coupling_type = "Kinematic" # String. "Kinematic" / "Distributing".
self._coupling_neighbor_layers = 1 # How deep does the neighborhood searching go. Default: 1.
self._isLaplacianSmoothingOn = True # Boolean. True: use laplacian smoothing. False: do not use laplacian smoothing.
self._laplacian_variable_name = "laplacianMatrixI3"
self._massMatrix_variable_name = "massMatrixI3"
self._laplacian_iter_num = 20 # Default: 3.
self._smoothing_rate = 0.1 # Default: 0.1 (Previous: 1e-4).
self.loads_num = 3 # For initial testing.
self._load_sampling_style = "gaussian" # String. Indicating the type of random sampling for force components. "uniform" / "gaussian".
self._load_scale = (0.0, 10.0) # Absolute range of the force for uniform sampling. Case and BC specific. (min, max). Unit: N.
self._gaussian_params = (4.0, 0.8) # Mean and deviation of the force for Gaussian sampling. Case and BC specific. (mean, deviation). Unit: N.
self._load_params_tuple = None
self._initial_force_component_vector = [] # List of floats. Default: []. Example: [5., 5., 5.].
self.autoIncrementNum = 5000 # Int. The maximum increment number of the AutoSolver.
self.initIncrem = 0.001 # Float. The initial length of the increment (for fixed-step, this is also the length per increm).
self.minIncrem = 1e-20 # Float. The minimum increment length for the AutoSolver (ueless for the StaticSolver).
self.maxIncrem = 1.0 # Float. The maximum increment length for the AutoSolver (useless for the StaticSovler).
self.totalTime = 1.0 # Float. The total time for one simulation step.
self.frameNum = 1 # Int. The number of frames intending to extract from the nodal file.
# ================== Load sampling variables ================== #
if self._isCoupleOn: self._couple_region_num = self.loads_num
else: self._couple_region_num = 0
if self._load_sampling_style == "gaussian": self._load_params_tuple = self._gaussian_params
elif self._load_sampling_style == "uniform": self._load_params_tuple = self._load_scale
else:
self._load_sampling_style = "uniform"
self._load_params_tuple = self._load_scale
# ============================================================= #
# Header.
self._header = ["*Heading"]
# Part definition.
self._part_name = "part-1"
self._material_name = "tissue"
self._part_initial = ["*Part, name={}".format(self._part_name)] # Total list of Part definition.
self._node = ["*Node"]
self._elem = ["*Element, type=C3D10"] # Nonlinear tetrahedron. http://web.mit.edu/calculix_v2.7/CalculiX/ccx_2.7/doc/ccx/node33.html#tennode.
self._nset_all = []
self._elset_all = []
self._section = ["*Solid Section, elset=allElems, material={}".format(self._material_name),
","]
self._part_end = ["*End Part"]
self._new_node_list = []
self._new_node_dict = {}
self._node_num = None
self._orig_node_num = None
self._elem_num = None
self._part = self.generatePart()
# Load settings.
self._loads_nset_name_list = []
self._rf_name_list = []
self._rf_nset_name_list = []
self._rf_nsets = []
self._load_nsets = [] # Nset definition of loads.
self._load = self.generateLoadSetting()
# Assembly definition.
self._assembly_name = "assembly-1"
self._instance_name = "instance-1"
self._assembly_initial = ["*Assembly, name={}".format(self._assembly_name)] # Total list of Assembly definition.
self._instance = ["*Instance, name={}, part={}".format(self._instance_name, self._part_name),
"*End Instance"]
self._ref_nodes_list = []
self._fix_nset_name = "fix"
self._fix_indices_list = fix_indices_list
self._fix_nset = self.generateNset(self._fix_indices_list, self._fix_nset_name, self._instance_name) # Nset definition of fix BC.
self._loads_posi_indices_list = self._generateLoadPositions(self.loads_num, self._fix_indices_list) # Generate load positions. Randomly. For fixed mode: style="fix", input_posi_indices_list=[415, 470, 107].
self._laplacian_initial_loads_posi = None # List. Containing the original position of concentrated forces.
self._laplacian_force_field = None # 2D Array of floats. Size: nSurfI * 3. The force field on the outer surface.
self._user_prescribed_force_field = user_prescribed_force_field # List of floats. Size: nSurfI * 3. The prescribed force field on the outer surface. Default: [].
self._surface_list = []
self._coupling_list = []
self._nset_boundary = [] # All nsets definitions in assembly. Boundary conditions
self._assembly_end = ["*End Assembly"]
self._assembly = self.generateAssembly()
# Material.
self.material_type = material_type # String. Indicate material type. "linear"/"neo_hookean_fitting"/"neo_hookean_solid".
self._material_def_file_name = "" # Default: "". If there is a file of stress strain definition, please specify here (must not be "").
self._material = self.generateMaterial(self.material_type)
# Boundary condition.
self._boundary_initial = ["*Boundary"]
self._boundary = self.generateBoundaryCondition_fixAll()
# Step settings.
self.freq = int(self.autoIncrementNum / self.frameNum) # Int. The data frame extraction frequency (also refers to the number of increments. Extract one frame per "self.freq" increments). Especially for StaticSolver case.
self._step = ["*Step, name=step-1, nlgeom=YES, inc={}".format(self.autoIncrementNum),
"*Static",
"{}, {}, {}, {}".format(self.initIncrem, self.totalTime,
self.minIncrem, self.maxIncrem)] # Auto solver.
self._step_end = ["*End Step"]
# Rest settings.
self._restart = ["*Restart, write, frequency=0"]
self._output = ["*Output, field, variable=PRESELECT",
"*Output, history, variable=PRESELECT"]
self._fil = ["*FILE FORMAT, ASCII",
"*node file, frequency={}".format(self.freq),
"U, COORD",
"*El file, frequency={}".format(self.freq),
"S, COORD"]
self._resSettings = self._restart + self._output + self._fil
def readFile(self, read_path):
"""
Read files from specific path.
Parameters:
----------
read_path: String.
Path of the original inp file.
Return:
----------
lines: List of strings.
The list of lines from the file.
"""
with open(read_path, "rt") as f: lines = f.read().splitlines()
return lines
def writeFile(self, write_status):
"""
Write 'self.write_lines' into a new inp file.
Parameters:
----------
write_status: String.
"Normal" / "Fast".
"Normal": generate all definitions;
"Fast": generate nodes and elements definition only.
"""
if write_status == "Normal":
self._inputFile_lines_total = (self._header + self._part + self._assembly +
self._material + self._boundary + self._step +
self._load + self._resSettings + self._step_end)
content = '\n'.join(self._inputFile_lines_total)
with open(self.writePath, 'w') as f: f.write(content)
elif write_status == "Fast":
self._inputFile_lines_total = self._header + self._part
content = '\n'.join(self._inputFile_lines_total)
with open(self.writePath, 'w') as f: f.write(content)
else:
self.writeFile("Normal")
def generatePart(self):
"""
Generate part definition.
Returns:
----------
The list collection of all sub-definition lists, including:
part_initial: header part of "Part definition".
node: Node definition.
elem: Element definition.
elset_all: The elset containing all elements. For material definition specifically.
section: Section definition.
part_end: The endline of "Part definition".
"""
self.generateNodes(self.data_mat[self._node_variable_name], self._node)
self.generateElements(self.data_mat[self._elem_variable_name], self._elem)
self.nonlinearization()
# Generate all element elset.
allElem_list, allElem_list_name = [], "allElems"
for i in range(len(self._elem[1:])): allElem_list.append(str(i+1))
self._elset_all = self.generateElset(allElem_list, allElem_list_name)
# Generate Section.
self._section = self.generateSection(allElem_list_name, self._material_name)
# Collection.
return (self._part_initial + self._node + self._elem + self._elset_all +
self._section + self._part_end)
def generateNodes(self, node_mat, target_node_list, specified_indices_list=[]):
"""
Generate nodes information.
Parameters:
----------
node_mat: 2D Array of ints.
The matrix containing the coordinates of the nodes to-be-defined under "*Node".
targer_node_list: List of strings.
The definition of node list.
specified_indices_list (optional): List of ints.
List the indices of the input node list, following the exact order of the node_mat.
Default: [].
"""
for i in range(node_mat.shape[0]):
if specified_indices_list == []: node_list_temp = ["{}".format(i+1)]
else: node_list_temp = ["{}".format(specified_indices_list[i])]
node_list_temp += [str(coord) for coord in list(node_mat[i,:])]
target_node_list.append(', '.join(node_list_temp))
def _extractOuterSurfaceNodes(self, faces_def_matrix, outer_surface_regionNum):
"""
Extract the nodes on the outer surface of the geometry (for force application in next step).
Parameters:
----------
faces_def_matrix: 2D Array of ints.
The definition of all faces, including the information of surface region number.
outer_surface_regionNum: Int.
The region number of outer surface of the geometry.
Returns:
----------
outer_surface_nodes_list: List of ints.
The indices of nodes on the outer surface. Indexed from 1. Sorted.
"""
outer_surface_nodes_list = []
for i in range(faces_def_matrix.shape[0]):
if faces_def_matrix[i,0] == outer_surface_regionNum: # The region number of outer surface.
outer_surface_nodes_list += [int(ind) for ind in faces_def_matrix[i,1:]] # Indexed from 1.
outer_surface_nodes_list = list(set(outer_surface_nodes_list))
outer_surface_nodes_list.sort()
return outer_surface_nodes_list
def generateElements(self, elem_mat, target_elem_list, specified_indices_list=[]):
"""
Generate elements information.
Parameters:
----------
elem_mat: 2D Array of ints.
The matrix containing the indices of each element to-be-defined under "*Element".
targer_elem_list: List of strings.
The definition of element list.
specified_indices_list (optional): List of ints.
List the indices of the input element list, following the exact order of the elem_mat.
Default: [].
"""
for i in range(elem_mat.shape[0]):
if specified_indices_list == []: elem_list_temp = ["{}".format(i+1)]
else: elem_list_temp = ["{}".format(specified_indices_list[i])]
elem_line_temp = [str(ind) for ind in list(elem_mat[i,:])]
# Make sure the order of nodes for tetrahedron definition is counter-clockwise, otherwise resulting in negative volume.
ind_temp = elem_line_temp[1]
elem_line_temp[1] = elem_line_temp[2]
elem_line_temp[2] = ind_temp
elem_list_temp += elem_line_temp
target_elem_list.append(', '.join(elem_list_temp))
def generateNset(self, node_list, nset_name, instance_name=None):
"""
Generate node set information.
Parameters:
----------
node_list: List of ints.
The list of nodes to be contained in the node list.
nset_name: String.
The name of the to-be-defined node list.
instance_name (optional): String.
The name of specified instance.
Only use in assembly definition.
Default: None. (Part cases)
Returns:
----------
nset: List of strings.
The definition of a specific nset.
"""
if instance_name == None: nset = ["*Nset, nset={}".format(nset_name)]
else: nset = ["*Nset, nset={}, instance={}".format(nset_name, instance_name)]
nset_line_temp, nset_string_temp = [], None
for i, ind in enumerate(node_list):
nset_line_temp.append(str(ind))
if (i+1) % 10 == 0:
nset_string_temp = ', '.join(nset_line_temp)
nset.append(copy.deepcopy(nset_string_temp))
nset_line_temp, nset_string_temp = [], None
nset_string_temp = ', '.join(nset_line_temp)
nset.append(copy.deepcopy(nset_string_temp))
return nset
def generateElset(self, elem_list, elset_name, instance_name=None):
"""
Generate element set information.
Parameters:
----------
elem_list: List of ints.
The list of elements to be contained in the element list.
elset_name: String.
The name of the to-be-defined element list.
instance_name (optional): String.
The name of specified instance.
Only use in assembly definition.
Default: None. (Part cases)
Returns:
----------
elset: List of strings.
The definition of a specific elset.
"""
if instance_name == None: elset = ["*Elset, elset={}".format(elset_name)]
else: elset = ["*Elset, elset={}, instance={}".format(elset_name, instance_name)]
elset_line_temp, elset_string_temp = [], None
for i, ind in enumerate(elem_list):
elset_line_temp.append(str(ind))
if (i+1) % 10 == 0:
elset_string_temp = ', '.join(elset_line_temp)
elset.append(copy.deepcopy(elset_string_temp))
elset_line_temp, elset_string_temp = [], None
elset_string_temp = ', '.join(elset_line_temp)
elset.append(copy.deepcopy(elset_string_temp))
return elset
def generateSection(self, elset_name, material_name):
"""
Generate section information.
Parameters:
----------
elset_name: String.
The name of the elset to be assigned a section.
material_name: String.
The name of defined material.
Returns:
----------
section: List of strings.
The definition of section.
"""
section = ["*Solid Section, elset={}, material={}".format(elset_name, material_name),
","]
return section
def generateMaterial(self, material_type):
"""
Generate lines for material definition.
Parameters:
----------
material_type: String.
Indicate what type of material is used.
Returns:
----------
material_lines: List of lines.
The lines of material definition.
"""
material_lines = ["*Material, name={}".format(self._material_name)]
if material_type == "neo_hookean_fitting":
stress_strain_lines = self._generateNeoHookeanFitting(self._modulus, (-0.3, 0.3), file_name=self._material_def_file_name)
material_lines += ["*Hyperelastic, neo hooke, test data input, poisson={}".format(self._poisson_ratio),
"*Uniaxial Test Data"]
material_lines += stress_strain_lines
elif material_type == "neo_hookean_solid":
c10 = self._modulus / (4 * (1 + self._poisson_ratio))
d1 = 6 * (1 - 2 * self._poisson_ratio) / self._modulus
material_lines += ["*Hyperelastic, neo hooke",
"{}, {}".format(c10, d1)]
elif material_type == "linear":
material_lines += ["*Elastic",
"{}, {}".format(self._modulus, self._poisson_ratio)]
else: material_lines = self.generateMaterial("linear")
return material_lines
def _generateNeoHookeanFitting(self, modulus, strain_range, file_name=""):
"""
Import/Generate stress strain data for neo-Hookean material fitting.
Parameters:
----------
modulus: Float.
The elastic modulus of material.
strain_range: Tuple of floats.
Range for strain interpolation.
file_name (optional): String.
The name of stress strain data definition file.
Default: "".
Returns:
----------
stress_strain_lines: List of strings.
The lines of stress strain data.
"""
if file_name != "": return self.readFile(file_name)
else:
"""
Assumptions of neo-Hookean formulation:
Incompressible (Poisson's ratio = ~0.5, small deformation).
Undergoing uniaxial loading.
Formulation: sigma = 2*C*(stretch - 1/(stretch^2)).
E = 6*C.
"""
strain_data = np.linspace(strain_range[0], strain_range[1], 100)
stretch_data = strain_data + 1.0
stress_data = (self._modulus / 3.0) * (stretch_data - 1.0 / stretch_data**2) # Formulation.
stress_strain_lines = []
for i in range(len(stress_data)):
stress_strain_lines.append("%.6f, %.6f" % (stress_data[i], strain_data[i]))
return stress_strain_lines
def _generateLoadPositions(self, loads_num, fix_indices_list, style="random", input_posi_indices_list=[]):
"""
Randomly generate positions of the load.
Parameters:
----------
loads_num: Int.
Number of loads.
fix_indices_list: List of ints.
Indices of fixed nodes.
style (optional): String.
Indicate how to generate initial load positions.
"random" / "fix":
"random": Randomly generate load positions.
"fix": Use the user input of initial load position indices.
Default: "random".
input_posi_indices_list (optional): List of ints.
User input of initial load positions indices list.
Indexed from 1.
Default: [].
Returns:
----------
loads_posi_indices_list: List of ints.
Picked indices for load application positions.
"""
if style == "random":
loads_posi_indices_list = []
for i in range(loads_num):
while(True):
load_posi_index_temp = random.choice(self._outer_surface_nodes_list) # Randomly chosen an outer surface node to apply load F(x, y, z). Indexed from 1.
if load_posi_index_temp not in fix_indices_list: break # The randomly generated index cannot be one of the fixed nodes.
loads_posi_indices_list.append(load_posi_index_temp)
return loads_posi_indices_list
elif style == "fix": return input_posi_indices_list
else: return self._generateLoadPositions(loads_num, fix_indices_list)
def _generateLoadValues(self, output_dimension, load_scale, sampling_style="uniform"):
"""
Randomly generate force values for load component definition.
Using function: numpy.random.rand().
Parameters:
----------
output_dimension: Tuple of ints.
The shape of output random array.
Size: 2*1. (dim1, dim2).
load_scale: Tuple of floats.
Size: 2*1. (min_laod, max_laod) / (mean, deviation).
sampling_style (optional): String.
Indicating the type of sampling.
"uniform": uniform distribution.
"gaussian": Gaussian distribution.
Default: "uniform".
Returns:
----------
load_result: Array of floats.
Size: output_dimension.
"""
if sampling_style == "uniform":
load_result = (np.random.rand(output_dimension[0], output_dimension[1]) * 2 - 1) * abs(load_scale[1] - load_scale[0])
load_result = load_result.reshape(-1,1)
for index, load_value_temp in enumerate(load_result):
if load_value_temp < 0: load_result[index] -= self._load_scale[0]
else: load_result[index] += self._load_scale[0]
load_result = load_result.reshape(output_dimension[0], output_dimension[1])
elif sampling_style == "gaussian":
mean, deviation = load_scale[0], load_scale[1]
load_result = np.random.normal(mean, deviation, size=output_dimension)
load_result = load_result.reshape(-1,1)
for index, load_value_temp in enumerate(load_result):
if np.random.rand() <= 0.5: load_result[index] *= -1
load_result = load_result.reshape(output_dimension[0], output_dimension[1])
else: load_result = self._generateLoadValues(output_dimension, load_scale)
return load_result
def generateAssembly(self):
"""
Generate assembly definition.
Returns:
----------
The list collection of all sub-definition lists, including:
assenbly_initial: Header of the assembly definition.
instance: The instance definition.
nset_boundary: The definition of BC related node set.
asssenbly_end: The endline of assembly definition.
"""
# Generate "self.loads_num" nsets, each of which has 1 node.
if self._isCoupleOn:
for i, load_posi_index_temp in enumerate(self._loads_posi_indices_list):
ref_name_temp = "rf-{}".format(i+1)
ref_nset_name_temp = "rf-{}-nset".format(i+1)
self._rf_name_list.append(ref_name_temp)
self._rf_nset_name_list.append(ref_nset_name_temp)
# Generate assembly node definitions for reference points.
ref_node_list_temp = ["*Node"]
ref_pt_coord_list_temp = [float(item) for item in self._node[load_posi_index_temp].split(',')[1:]]
self.generateNodes(np.array(ref_pt_coord_list_temp).astype(float).reshape(1,-1), ref_node_list_temp,
specified_indices_list=[i+1])
self._ref_nodes_list += copy.deepcopy(ref_node_list_temp)
rf_nset_list_temp = self._findCouplingNodes(load_posi_index_temp, self._coupling_neighbor_layers)
# Generate reference point node sets.
self._load_nsets += self.generateNset([i+1], ref_name_temp)
# Generate coupling constraint node sets.
self._rf_nsets += self.generateNset(rf_nset_list_temp, ref_nset_name_temp,
self._instance_name)
self.generateCoupling()
else:
if self._isLaplacianSmoothingOn:
force_vector_temp = np.zeros(shape=(3*self._surface_nodes_num, 1))
self._laplacian_initial_loads_posi = copy.deepcopy(self._loads_posi_indices_list)
if self._initial_force_component_vector == []:
for load_posi_index_temp in self._loads_posi_indices_list:
force_vector_temp[(load_posi_index_temp-1)*3:load_posi_index_temp*3,:] = self._generateLoadValues((3,1), self._load_params_tuple,
sampling_style=self._load_sampling_style)
else:
for load_posi_index_temp in self._loads_posi_indices_list:
force_vector_temp[(load_posi_index_temp-1)*3:load_posi_index_temp*3,:] = np.array(self._initial_force_component_vector).astype(float).reshape(3,1)
laplacian_matrix, mass_matrix = self.data_mat[self._laplacian_variable_name], self.data_mat[self._massMatrix_variable_name]
laplacian_matrix = self._laplacianMatrixShrink(laplacian_matrix, self._surface_nodes, self.data_mat["faces"], self._outer_surface_regionNum)
force_vector_new = self._laplacianSmoothing(force_vector_temp, laplacian_matrix, mass_matrix, iter_num=self._laplacian_iter_num,
smoothing_rate=self._smoothing_rate, laplacian_force_field=self._user_prescribed_force_field) # Size: (nSurfI x 3)*1. Fix force value: initial_BC_state="fix" (not recommended).
self._laplacian_force_field = force_vector_new.reshape(-1,3)
self._loads_posi_indices_list = copy.deepcopy([(list(force_vector_new).index(item)//3)+1 for item in list(force_vector_new) if item != 0]) # Indexed from 1.
self._loads_posi_indices_list = list(set(self._loads_posi_indices_list))
self._loads_posi_indices_list.sort()
for i, load_posi_index_temp in enumerate(self._loads_posi_indices_list):
load_nset_name_temp = "Load-{}".format(i+1)
self._loads_nset_name_list.append(load_nset_name_temp)
self._load_nsets += self.generateNset([load_posi_index_temp], load_nset_name_temp, self._instance_name)
self._load_nsets += self.generateNset(self._laplacian_initial_loads_posi, "Orig_loads_posi", self._instance_name)
self._load = self.generateLoadSetting(force_list=list(force_vector_new.reshape(-1,1)))
else:
for i, load_posi_index_temp in enumerate(self._loads_posi_indices_list):
load_nset_name_temp = "Load-{}".format(i+1)
self._loads_nset_name_list.append(load_nset_name_temp)
self._load_nsets += self.generateNset([load_posi_index_temp], load_nset_name_temp, self._instance_name)
# Concatenate assembly subparts.
self._nset_boundary = self._nset_boundary + self._load_nsets + self._rf_nsets + self._fix_nset + self._surface_list + self._coupling_list
return (self._assembly_initial + self._instance + self._ref_nodes_list + self._nset_boundary + self._assembly_end)
def generateCoupling(self):
"""
Generate coupling constriants for concentrated forces application.
"""
for index, rf_name in enumerate(self._rf_nset_name_list):
self._surface_list += ["*Surface, type=NODE, name={}_CNS_, internal".format(rf_name),
"{}, 1.".format(rf_name)]
self._coupling_list += ["*Coupling, constraint name={}, ref node={}, surface={}_CNS_".format(self._rf_name_list[index],
self._rf_name_list[index],
rf_name),
"*{}".format(self._coupling_type)]
def _findCouplingNodes(self, rf_node_ind, neighbor_layers):
"""
Find the immediate neighbors of each specified node index.
Parameters:
----------
rf_node_ind: Int.
The index of target node.
Returns:
----------
rf_nset_list: List of ints (duplicated items removed).
"rf_node_ind"'s corresponding immediate neighbor nodes set.
"""
rf_nset_list, new_nodes_list, searched_nodes_list = [rf_node_ind], [rf_node_ind], []
for j in range(neighbor_layers):
for ind_temp in new_nodes_list:
for i in range(len(self._triangle_nodes_list)):
if ind_temp in self._triangle_nodes_list[i]:
rf_nset_list += copy.deepcopy(self._triangle_nodes_list[i])
else: continue
searched_nodes_list += copy.deepcopy(new_nodes_list)
rf_nset_list = list(set(copy.deepcopy(rf_nset_list)))
new_nodes_list = [ind for ind in rf_nset_list if ind not in searched_nodes_list]
# Avoid assigning same nodes to different coupled node sets.
for ind in rf_nset_list:
if ind in self._coupled_list: rf_nset_list.remove(ind)
else: self._coupled_list.append(ind)
return rf_nset_list
def generateBoundaryCondition_fixAll(self):
"""
Generate fix boundary condition.
Returns:
----------
The list collection of all sub-definition lists, including:
boundary_initial: Header of boundary condition definition.
BC_list_temp: The detailed BC definition of boundary conditions.
"""
BC_list_temp = []
for i in range(6): # 6: 6 DOFs (disp. + rot.); 3: 3 DOFs (disp.).
BC_list_temp.append("{}, {}, {}".format(self._fix_nset_name, i+1, i+1))
return (self._boundary_initial + BC_list_temp)
def generateLoadSetting(self, force_list=[]):
"""
Generate load information.
Returns:
----------
load_list: List of strings.
Definition of concentrated forces.
force_list (optional): List of forces (floats).
Size: loads_num * 3.
Default: [].
"""
load_list = []
if force_list == []:
force_list = list(self._generateLoadValues((self.loads_num*3, 1), self._load_params_tuple, sampling_style=self._load_sampling_style))
force_list = np.array(force_list).astype(float).reshape(-1,3) # 2D Array of floats. Size: self._loads_num * 3.
if self._isCoupleOn:
for j, rf_name in enumerate(self._rf_name_list): # Length: self._loads_num
load_temp = ["*Cload, op=NEW"]
for i in range(force_list.shape[1]): # 3: Three directions.
load_temp.append("{}, {}, {}".format(rf_name, i+1, force_list[j,i]))
load_list += copy.deepcopy(load_temp)
else:
for j, load_name in enumerate(self._loads_nset_name_list): # Length: length of self._loads_nset_name_list.
load_temp = ["*Cload"]
for i in range(force_list.shape[1]): # 3: Three directions.
load_temp.append("{}, {}, {}".format(load_name, i+1, force_list[self._loads_posi_indices_list[j]-1,i]))
load_list += copy.deepcopy(load_temp)
return load_list
def _laplacianMatrixShrink(self, laplacian_matrix, surface_nodes_list, faces_def_matrix, outer_surface_regionNum):
"""
Assign zeros to the DOFs without force value applied.
Parameters:
----------
laplacian_matrix: 2D Array of floats.
The surface's Laplacian for force smoothing.
Size: nSurfI*3 x nSurfI*3.
surface_nodes_list: List of ints.
All indices of nodes on all surfaces.
faces_def_matrix: 2D Array of ints.
The definition of all faces, including the information of surface region number.
outer_surface_regionNum: Int.
The region number of outer surface of the geometry.
Returns:
----------
laplacian_matrix: 2D Array of floats.
Laplacian with zeros assigned to the nodes not on the outer surfaces.
Size: nSurfI*3 x nSurfI*3.
"""
surface_nodes_list = [ind for ind in surface_nodes_list]
outer_surface_nodes_list = self._extractOuterSurfaceNodes(faces_def_matrix, outer_surface_regionNum)
other_surface_nodes_list = [ind for ind in surface_nodes_list if ind not in outer_surface_nodes_list]
other_surface_nodes_list.sort()
for ind in other_surface_nodes_list:
laplacian_matrix[surface_nodes_list.index(ind)*3:(surface_nodes_list.index(ind)+1)*3,:] = 0.0
laplacian_matrix[:,surface_nodes_list.index(ind)*3:(surface_nodes_list.index(ind)+1)*3] = 0.0
return laplacian_matrix
def _laplacianSmoothing(self, force_vector, laplacian_matrix, mass_matrix, iter_num=3, smoothing_rate=1e-4, initial_BC_state="", laplacian_force_field=[]):
"""
Implement laplacian smoothing based on pre-calculated Laplacian matrix.
Formulation: Forward Euler.
F_(n+1) = (I + lambda*massMatrix*Laplacian) * F_n
Parameters:
----------
force_vector: 1D Array of floats.
With concentrated force values applied at the specidied nodes.
Size: (self._surface_nodes_num x 3) * 1.
laplacian_matrix: 2D Array of floats.
Size: (self._surface_nodes_num x 3) * (self._surface_nodes_num x 3).
mass_matrix: 2D Array of floats.
Diagonal matrix.
Size: (self._surface_nodes_num x 3) * (self._surface_nodes_num x 3).
iter_num (optional): Int.
The number of smoothing iterations.
Default: 3.
smoothing_rate (optional): float.
The coefficient that control the step size of smoothing.
Default: 1e-4.
initial_BC_state (optional): String.
Indicating whether to "fix" or "decay" the original concentrated force value.
Default: "". Indicating smoothing including the original forces.
laplacian_force_field (optional): List of floats.
The user-prescribed vector of laplacian force field.
Size: self._surface_nodes_num x 3.
Default: [].
Returns:
----------
force_vector_new: 1D Array of floats.
The laplacian-smoothed force vector.
Size: (self._surface_nodes_num x 3) * 1.
"""
if laplacian_force_field == []:
force_vector_new = copy.deepcopy(force_vector)
for i in range(iter_num):
force_vector_new += smoothing_rate * (laplacian_matrix @ force_vector_new) # Without mass matrix.
# force_vector_new += smoothing_rate * (mass_matrix @ laplacian_matrix @ force_vector_new) # With mass matrix (NOT recommended).
if initial_BC_state == "fix":
for j, value in enumerate(force_vector):
if value != 0:
force_vector_new[j] = value
else: force_vector_new = np.array(laplacian_force_field).astype(float).reshape(len(laplacian_force_field),1)
return force_vector_new
def _computeMidPoint(self, ind_1, ind_2):
"""
Compute the mid-point of the edge.
Parameters:
----------
ind_1: Int.
The first index of the node pair. Indexed from 1.
ind_2: Int.
The second index of the node pair. Indexed from 1.
Returns:
----------
ind_mid: Int.
The index of the self._node. Index from 1.
"""
key_string_temp_1, key_string_temp_2 = "{}_{}".format(ind_1, ind_2), "{}_{}".format(ind_2, ind_1)
if key_string_temp_1 in self._new_node_dict.keys(): return self._new_node_dict[key_string_temp_1]
elif key_string_temp_2 in self._new_node_dict.keys(): return self._new_node_dict[key_string_temp_2]
else:
coord_temp_1 = np.array(self._node[ind_1].split(',')[1:]).astype(float).reshape(1,-1)
coord_temp_2 = np.array(self._node[ind_2].split(',')[1:]).astype(float).reshape(1,-1)
coord_temp_mid = (coord_temp_1 + coord_temp_2) / 2.0
coord_mid_list = [str(item) for item in list(coord_temp_mid[0])]
self._node_num = len(self._node)
new_node_def_list_temp = copy.deepcopy([str(self._node_num)])
new_node_def_list_temp += copy.deepcopy(coord_mid_list)
self._node.append(', '.join(new_node_def_list_temp))
self._new_node_list.append(', '.join(new_node_def_list_temp))
self._new_node_dict[key_string_temp_1] = self._node_num
self._new_node_dict[key_string_temp_2] = self._node_num
return self._node_num
def insertNode(self):
"""
Insert one node (at the mid-point) of each edge.
Create C3D10 element structure.
"""
for index, elem_def_string in enumerate(self._elem[1:]):
elem_node_list_temp = [int(ind) for ind in elem_def_string.split(',')[1:]]
# Obtain the mid-point index in order. Assume tetrahedral element (C3D4).
mid_pt_ind_5 = self._computeMidPoint(elem_node_list_temp[0], elem_node_list_temp[1])
mid_pt_ind_6 = self._computeMidPoint(elem_node_list_temp[1], elem_node_list_temp[2])
mid_pt_ind_7 = self._computeMidPoint(elem_node_list_temp[0], elem_node_list_temp[2])
mid_pt_ind_8 = self._computeMidPoint(elem_node_list_temp[0], elem_node_list_temp[3])
mid_pt_ind_9 = self._computeMidPoint(elem_node_list_temp[1], elem_node_list_temp[3])
mid_pt_ind_10 = self._computeMidPoint(elem_node_list_temp[2], elem_node_list_temp[3])
elem_new_def_list_temp = [str(mid_pt_ind_5),
str(mid_pt_ind_6),
str(mid_pt_ind_7),
str(mid_pt_ind_8),
str(mid_pt_ind_9),
str(mid_pt_ind_10)]
# Redefine the new C3D10 element in order.
elem_def_list_temp = copy.deepcopy(elem_def_string.split(',')) + copy.deepcopy(elem_new_def_list_temp)
elem_def_string_temp = ', '.join(elem_def_list_temp)
self._elem[index+1] = copy.deepcopy(elem_def_string_temp)
def _triangleNodesCollection(self):
"""
Collect all the nodes on each triangle (surface).
Need to be implemented after "self.insertNode()".
"""
for i in range(self._surface_mat.shape[0]):
tri_temp = self._surface_mat[i,:]
# Assuming all triangles on the surface of geometry.
middle_pts_list_temp = [self._computeMidPoint(tri_temp[0], tri_temp[1]),
self._computeMidPoint(tri_temp[0], tri_temp[2]),
self._computeMidPoint(tri_temp[1], tri_temp[2])]
triangle_nodes_list_temp = list(copy.deepcopy(tri_temp)) + copy.deepcopy(middle_pts_list_temp)
self._triangle_nodes_list.append(copy.deepcopy(triangle_nodes_list_temp)) # List of lists of ints.
def nonlinearization(self):
"""
Nonlinearize the linear tetrahedral (CST) element to quadratic tetrahedral element.
"""
self._elem_num = len(self._elem) - 1
self._orig_node_num = len(self._node) - 1
self.insertNode()
self._triangleNodesCollection()
self._node_num = len(self._node) - 1
def saveLog(file_name_list, elapsed_time_list, write_status, data_file_name,
sample_num, fix_indices_list, loads_num, load_sampling_type, load_param_tuple,
material_type, modulus, poisson_ratio, isCoupleOn, isLaplacianSmoothingOn,
coupling_type="", coupling_neighbor_layer_num=1,
laplacian_iter_num=5, laplacian_smoothing_rate=1e-4, write_path="nonlinear_case_generation.log"):
"""
Save the nonlinear cases generation results into .log file.
Parameters:
----------
file_name_list: List of strings.
Names of generated files.
elapsed_time_list: List of floats.
Elapsed time of generation for each input file.
In exact order.
write_status: String.
Indicating the type of input file generation.
"Normal" / "Fast":
"Normal": generate all definitions;
"Fast": generate nodes and elements definition only.
data_file_name: String.
The name of modeling data file.
Format: .mat
sample_num: Int.
Number of generated input files.
fix_indices_list: List of ints.
Indices of fixed points.
Indexed from 1.
loads_num: Int.
The number of concentrated forces.
load_sampling_type: String.
The distribution type for force sampling.
"uniform" / "gaussian":
"uniform": uniform distribution with specified (min, max) range.
"gaussian": gaussian distribution with specified (mean, dev) parameters.
load_param_tuple: tuple of floats.
Parameters of load sampling.
load_sampling_type specific.
material_type: String.
The type of material.
"linear" / "neo_hookean_solid" / "neo_hookean_fitting":
"linear": linear elastic material.
"neo_hookean_solid": neo-Hookean solid following the stain energy formulation.
"neo_hookean_fitting": neo-Hookean solid following the strass-strain curved fitted from user-input strss-strain data.
modulus: Float.
Elastic modulus of the material.
poisson_ratio: Float.
Poisson's ratio of the material.
isCoupleOn: Boolean indicator.
True: using coupling constraint for local force distribution.
False: not using coupling constraint.
isLaplacianSmoothingOn: Boolean indicator.
True: using Laplacian-Beltrami operator matrix to smooth the force distribution.
False: not using Laplacian smoothing.
coupling_type (optional): String.
The type of coupling constraint.
Default: "".
coupling_neighbor_layer_num (optional): Int.
The number of neighbor layers to which the local force distributing goes.
Default: 1.
laplacian_iter_num (optional): Int.
The number of iteration for laplacian smoothing.
Default: 5.
laplacian_smoothing_rate (optional): Float.
The rate of Laplacian smoothing.
Default: 1e-4.
write_path (optional): String.
The path of to-be-written file.
Default: "nonlinear_case_generation.log".
"""
if isCoupleOn: isCoupleOn_status = "On"
else: isCoupleOn_status = "Off"
if isLaplacianSmoothingOn: isLaplacianSmoothingOn_status = "On"
else: isLaplacianSmoothingOn_status = "Off"
content = ["Data_file_name: {}".format(data_file_name),
"Sample_num = {}".format(sample_num),
"Fixed_indices_list (indexed from 1): {}".format(fix_indices_list),
"Material type: {}".format(material_type),
"Elastic modulus = {} Pa".format(modulus),
"Poisson's ratio = {}".format(poisson_ratio),
"Loads_num = {}".format(loads_num)]
if load_sampling_type == "uniform":
content += ["Load sampling type: {}".format(load_sampling_type),
"Load sampling range (min, max): {} N".format(load_param_tuple)]
elif load_sampling_type == "gaussian":
content += ["Load sampling type: {}".format(load_sampling_type),
"Load sampling parameters (mean, dev): {} N".format(load_param_tuple)]
else:
load_sampling_type = "uniform"
content += ["Load sampling type: {}".format(load_sampling_type),
"Load sampling range (min, max): {} N".format(load_param_tuple)]
content += ["Coupling constraint status: {}".format(isCoupleOn_status),
"Laplacian smoothing status: {}".format(isLaplacianSmoothingOn_status)]
if isCoupleOn:
content += ["Coupling type: {}".format(coupling_type),
"Coupling neighbor layer numbers: {}".format(coupling_neighbor_layer_num)]
if isLaplacianSmoothingOn:
content += ["Laplacian smoothing iteration numbers = {}".format(laplacian_iter_num),
"Laplacian smoothing rate = {}".format(laplacian_smoothing_rate)]
content += ["----------------------------------------------------------",
"Input file\t\tExport status\tGeneration status\tElapsed time/s"]
elapsed_time_total = 0
for i, file_name in enumerate(file_name_list):
data_string_temp = "{}\t\t{}\t\tCompleted\t".format(file_name, write_status) + "\t%.8f" % (elapsed_time_list[i])
content.append(data_string_temp)
elapsed_time_total += elapsed_time_list[i]
content += ["----------------------------------------------------------",
"Total elapsed time: {} s".format(elapsed_time_total)]
content = '\n'.join(content)
with open(write_path, 'w') as f: f.write(content)
def main():
abaqus_default_directory = "C:/temp" # Default working directory of Abaqus.
inp_folder = "inp_files"
sample_nums = 1500
data_file_path = "data_aorta.mat"
node_variable_name, elem_variable_name = "NodeI", "EleI"
results_folder_path_stress, results_folder_path_coor = "stress", "coor"
material_type = "neo_hookean_solid" # "linear" / "neo_hookean_fitting" / "neo_hookean_solid".
fix_indices_list = [1148, 1156, 1169] # Specify the node to fix. At least 3. Indexed from 1.
write_status = "Normal" # String. "Normal" / "Fast". "Normal": generate all definitions; "Fast": generate nodes and elements definition only.
# ================================== Force interpolation related variables ================================== #
force_field_mat_name = "force_field_data.mat"
force_interpolation_folder = "inp_interpolation"
isPrescribedForceOn = True # Boolean indicator. True: use prescribed force field; False: no specified force field. Default: False.
force_type = "random" # String. The type of prescribed force field. "interpolated": interpolated force fields; "random": weighted-summed force fields.
eigen_num_force, force_scalar = 20, 0.4 # Float. The scalar of force fields controlling the force magnitude -> deformation magnitude of the tumor in nonlinear solver. Unit: N.
# =========================================================================================================== #
if isPrescribedForceOn:
"""
The pipeline of generating interpolated force fields:
1. Run "nonlinearCasesCreation.py" with 'isPrescribedForceOn = False' firstly.
2. Run "forceInterpolation.py" in the same directory.
3. Set 'isPrescribedForceOn = True', set 'force_type = "interpolated", then run "nonlinearCasesCreation.py" again.
Get input files with "*_interpolated.inp" in the folder 'force_interpolation_folder'.
4. Set 'isPrescribedForceOn = True', set 'force_type = "random", then run "nonlinearCasesCreation.py" again.
Get input files with "*_random.inp" in the folder 'force_interpolation_folder'.
"""
force_fields = (scipy.io.loadmat(force_field_mat_name)["force_field_interpolated"] if force_type == "interpolated" else
scipy.io.loadmat(force_field_mat_name)["force_field_random"]) # Size: nSurfI*3 x sampleNum. Concatenated as xyzxyz...
sample_nums = force_fields.shape[1]
# Generate input file for Abaqus.
file_name_list, elapsed_time_list, force_field_matrix = [], [], None
for i in range(sample_nums):
start_time = time.time()
if isPrescribedForceOn:
if not os.path.isdir(force_interpolation_folder): os.mkdir(force_interpolation_folder)
file_name_temp = ("{}_interpolated.inp".format(str(i+20001)) if force_type == "interpolated" else
"{}_random.inp".format(str(i+20001)))
write_path = os.path.join(force_interpolation_folder, file_name_temp)
force_field_prescribed_list = list(force_fields[:,i])
inputFile_temp = inputFileGenerator(data_file_path, write_path, material_type,
fix_indices_list, node_variable_name, elem_variable_name,
user_prescribed_force_field=force_field_prescribed_list)
else:
if not os.path.isdir(inp_folder): os.mkdir(inp_folder)
file_name_temp = "{}.inp".format(str(i+20001))
write_path = os.path.join(inp_folder, file_name_temp)
inputFile_temp = inputFileGenerator(data_file_path, write_path, material_type,
fix_indices_list, node_variable_name, elem_variable_name)
inputFile_temp.writeFile(write_status)
end_time = time.time()
elapsed_time = end_time - start_time
file_name_list.append(file_name_temp)
elapsed_time_list.append(elapsed_time)
if i == 0: force_field_matrix = inputFile_temp._laplacian_force_field.reshape(-1,1)
else: force_field_matrix = np.hstack((force_field_matrix, inputFile_temp._laplacian_force_field.reshape(-1,1)))
# ============================ For force visualization only (sample_nums = 1) ============================ #
# print(inputFile_temp._laplacian_initial_loads_posi)
# force_field = {"force_field": inputFile_temp._laplacian_force_field}
# scipy.io.savemat("force_field.mat", force_field)
# ======================================================================================================== #
print("Input_file: ", file_name_temp, "| Status:", write_status, "| Generation: Completed | Time: %.4f s" % (elapsed_time))
saveLog(file_name_list, elapsed_time_list, write_status, data_file_path, sample_nums,
fix_indices_list, inputFile_temp.loads_num, inputFile_temp._load_sampling_style, inputFile_temp._load_params_tuple,
material_type, inputFile_temp._modulus, inputFile_temp._poisson_ratio,
inputFile_temp._isCoupleOn, inputFile_temp._isLaplacianSmoothingOn,
coupling_type=inputFile_temp._coupling_type, coupling_neighbor_layer_num=inputFile_temp._coupling_neighbor_layers,
laplacian_iter_num=inputFile_temp._laplacian_iter_num, laplacian_smoothing_rate=inputFile_temp._smoothing_rate,
write_path="nonlinear_case_generation.log")
if not isPrescribedForceOn: weight_matrix = (2.0 * np.random.rand(eigen_num_force, 3*sample_nums) - 1.0) # Distinct random weights corresponding to each laplacian-force-field.
else: weight_matrix = scipy.io.loadmat(force_field_mat_name)["weight_matrix"] # Distinct random force field for each laplacian-force-field.
mdict = {"fix_indices_list": fix_indices_list,
"orig_data_file_name": data_file_path,
"orig_config_var_name": node_variable_name,
"inp_folder": inp_folder if not isPrescribedForceOn else force_interpolation_folder, # The folder containing input files.
"current_directory": os.getcwd(),
"results_folder_path_stress": results_folder_path_stress,
"results_folder_path_coor": results_folder_path_coor,
"original_node_number": inputFile_temp._orig_node_num,
"couple_region_num": inputFile_temp._couple_region_num,
"force_field_matrix": force_field_matrix, # The force field matrix of all generated samples. Size: nSurfI*3 x sampleNum_total.
"weight_matrix": weight_matrix, "force_scalar_coeff": force_scalar, # The randomly generated matrix for force fields' reconstruction. Size: eigen_num x (3*sample_num).
"eigen_number_force": eigen_num_force, # Int. The eigenmode number of force field reconstruction. (Used only in force field interpolation)
"alpha_indexing_vector": np.zeros(shape=(sample_nums, 1)) if not isPrescribedForceOn else scipy.io.loadmat(force_field_mat_name)["alpha_indexing_vector"]
}
scipy.io.savemat("training_parameters_transfer.mat", mdict)
# np.save(os.path.join(abaqus_default_directory, "training_parameters_transfer.npy"), mdict, fix_imports=True)
# np.savez(os.path.join(abaqus_default_directory, "training_parameters_transfer.npz"),
# fix_indices_list=fix_indices_list,
# orig_data_file_name=data_file_path,
# orig_config_var_name=node_variable_name,
# inp_folder=inp_folder,
# current_directory=os.getcwd(),
# results_folder_path_stress=results_folder_path_stress,
# results_folder_path_coor=results_folder_path_coor)
if __name__ == "__main__":
main()
| [((1187, 21, 1187, 32), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((1212, 19, 1212, 30), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((1244, 34, 1244, 45), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((393, 20, 393, 51), 'copy.deepcopy', 'copy.deepcopy', ({(393, 34, 393, 50): 'nset_string_temp'}, {}), '(nset_string_temp)', False, 'import copy\n'), ((433, 21, 433, 53), 'copy.deepcopy', 'copy.deepcopy', ({(433, 35, 433, 52): 'elset_string_temp'}, {}), '(elset_string_temp)', False, 'import copy\n'), ((529, 26, 529, 76), 'numpy.linspace', 'np.linspace', ({(529, 38, 529, 53): 'strain_range[0]', (529, 55, 529, 70): 'strain_range[1]', (529, 72, 529, 75): '100'}, {}), '(strain_range[0], strain_range[1], 100)', True, 'import numpy as np\n'), ((761, 35, 761, 64), 'copy.deepcopy', 'copy.deepcopy', ({(761, 49, 761, 63): 'new_nodes_list'}, {}), '(new_nodes_list)', False, 'import copy\n'), ((906, 31, 906, 58), 'copy.deepcopy', 'copy.deepcopy', ({(906, 45, 906, 57): 'force_vector'}, {}), '(force_vector)', False, 'import copy\n'), ((990, 34, 990, 69), 'copy.deepcopy', 'copy.deepcopy', ({(990, 48, 990, 68): 'elem_def_string_temp'}, {}), '(elem_def_string_temp)', False, 'import copy\n'), ((1194, 25, 1194, 81), 'os.path.join', 'os.path.join', ({(1194, 38, 1194, 64): 'force_interpolation_folder', (1194, 66, 1194, 80): 'file_name_temp'}, {}), '(force_interpolation_folder, file_name_temp)', False, 'import os\n'), ((1206, 25, 1206, 65), 'os.path.join', 'os.path.join', ({(1206, 38, 1206, 48): 'inp_folder', (1206, 50, 1206, 64): 'file_name_temp'}, {}), '(inp_folder, file_name_temp)', False, 'import os\n'), ((1252, 38, 1252, 70), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((622, 26, 622, 82), 'numpy.random.normal', 'np.random.normal', (), '', True, 'import numpy as np\n'), ((662, 40, 662, 73), 'copy.deepcopy', 'copy.deepcopy', ({(662, 54, 662, 72): 'ref_node_list_temp'}, {}), '(ref_node_list_temp)', False, 'import copy\n'), ((676, 36, 676, 82), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((678, 53, 678, 97), 'copy.deepcopy', 'copy.deepcopy', ({(678, 67, 678, 96): 'self._loads_posi_indices_list'}, {}), '(self._loads_posi_indices_list)', False, 'import copy\n'), ((818, 29, 818, 53), 'copy.deepcopy', 'copy.deepcopy', ({(818, 43, 818, 52): 'load_temp'}, {}), '(load_temp)', False, 'import copy\n'), ((827, 29, 827, 53), 'copy.deepcopy', 'copy.deepcopy', ({(827, 43, 827, 52): 'load_temp'}, {}), '(load_temp)', False, 'import copy\n'), ((952, 38, 952, 67), 'copy.deepcopy', 'copy.deepcopy', ({(952, 52, 952, 66): 'coord_mid_list'}, {}), '(coord_mid_list)', False, 'import copy\n'), ((987, 77, 987, 114), 'copy.deepcopy', 'copy.deepcopy', ({(987, 91, 987, 113): 'elem_new_def_list_temp'}, {}), '(elem_new_def_list_temp)', False, 'import copy\n'), ((1007, 71, 1007, 106), 'copy.deepcopy', 'copy.deepcopy', ({(1007, 85, 1007, 105): 'middle_pts_list_temp'}, {}), '(middle_pts_list_temp)', False, 'import copy\n'), ((1008, 45, 1008, 84), 'copy.deepcopy', 'copy.deepcopy', ({(1008, 59, 1008, 83): 'triangle_nodes_list_temp'}, {}), '(triangle_nodes_list_temp)', False, 'import copy\n'), ((1190, 19, 1190, 60), 'os.path.isdir', 'os.path.isdir', ({(1190, 33, 1190, 59): 'force_interpolation_folder'}, {}), '(force_interpolation_folder)', False, 'import os\n'), ((1190, 62, 1190, 98), 'os.mkdir', 'os.mkdir', ({(1190, 71, 1190, 97): 'force_interpolation_folder'}, {}), '(force_interpolation_folder)', False, 'import os\n'), ((1203, 19, 1203, 44), 'os.path.isdir', 'os.path.isdir', ({(1203, 33, 1203, 43): 'inp_folder'}, {}), '(inp_folder)', False, 'import os\n'), ((1203, 46, 1203, 66), 'os.mkdir', 'os.mkdir', ({(1203, 55, 1203, 65): 'inp_folder'}, {}), '(inp_folder)', False, 'import os\n'), ((1237, 55, 1237, 101), 'numpy.random.rand', 'np.random.rand', ({(1237, 70, 1237, 85): 'eigen_num_force', (1237, 87, 1237, 100): '(3 * sample_nums)'}, {}), '(eigen_num_force, 3 * sample_nums)', True, 'import numpy as np\n'), ((389, 28, 389, 59), 'copy.deepcopy', 'copy.deepcopy', ({(389, 42, 389, 58): 'nset_string_temp'}, {}), '(nset_string_temp)', False, 'import copy\n'), ((429, 29, 429, 61), 'copy.deepcopy', 'copy.deepcopy', ({(429, 43, 429, 60): 'elset_string_temp'}, {}), '(elset_string_temp)', False, 'import copy\n'), ((574, 43, 574, 88), 'random.choice', 'random.choice', ({(574, 57, 574, 87): 'self._outer_surface_nodes_list'}, {}), '(self._outer_surface_nodes_list)', False, 'import random\n'), ((762, 36, 762, 63), 'copy.deepcopy', 'copy.deepcopy', ({(762, 50, 762, 62): 'rf_nset_list'}, {}), '(rf_nset_list)', False, 'import copy\n'), ((1007, 44, 1007, 67), 'copy.deepcopy', 'copy.deepcopy', ({(1007, 58, 1007, 66): 'tri_temp'}, {}), '(tri_temp)', False, 'import copy\n'), ((611, 27, 611, 83), 'numpy.random.rand', 'np.random.rand', ({(611, 42, 611, 61): 'output_dimension[0]', (611, 63, 611, 82): 'output_dimension[1]'}, {}), '(output_dimension[0], output_dimension[1])', True, 'import numpy as np\n'), ((626, 19, 626, 35), 'numpy.random.rand', 'np.random.rand', ({}, {}), '()', True, 'import numpy as np\n'), ((758, 40, 758, 83), 'copy.deepcopy', 'copy.deepcopy', ({(758, 54, 758, 82): 'self._triangle_nodes_list[i]'}, {}), '(self._triangle_nodes_list[i])', False, 'import copy\n'), ((809, 21, 809, 41), 'numpy.array', 'np.array', ({(809, 30, 809, 40): 'force_list'}, {}), '(force_list)', True, 'import numpy as np\n'), ((916, 33, 916, 64), 'numpy.array', 'np.array', ({(916, 42, 916, 63): 'laplacian_force_field'}, {}), '(laplacian_force_field)', True, 'import numpy as np\n'), ((660, 35, 660, 67), 'numpy.array', 'np.array', ({(660, 44, 660, 66): 'ref_pt_coord_list_temp'}, {}), '(ref_pt_coord_list_temp)', True, 'import numpy as np\n'), ((686, 97, 686, 143), 'numpy.array', 'np.array', ({(686, 106, 686, 142): 'self._initial_force_component_vector'}, {}), '(self._initial_force_component_vector)', True, 'import numpy as np\n')] |
dongboyan77/quay | data/cache/test/test_cache.py | 8018e5bd80f17e6d855b58b7d5f2792d92675905 | import pytest
from mock import patch
from data.cache import InMemoryDataModelCache, NoopDataModelCache, MemcachedModelCache
from data.cache.cache_key import CacheKey
class MockClient(object):
def __init__(self, server, **kwargs):
self.data = {}
def get(self, key, default=None):
return self.data.get(key, default)
def set(self, key, value, expire=None):
self.data[key] = value
@pytest.mark.parametrize("cache_type", [(NoopDataModelCache), (InMemoryDataModelCache),])
def test_caching(cache_type):
key = CacheKey("foo", "60m")
cache = cache_type()
# Perform two retrievals, and make sure both return.
assert cache.retrieve(key, lambda: {"a": 1234}) == {"a": 1234}
assert cache.retrieve(key, lambda: {"a": 1234}) == {"a": 1234}
def test_memcache():
key = CacheKey("foo", "60m")
with patch("data.cache.impl.Client", MockClient):
cache = MemcachedModelCache(("127.0.0.1", "-1"))
assert cache.retrieve(key, lambda: {"a": 1234}) == {"a": 1234}
assert cache.retrieve(key, lambda: {"a": 1234}) == {"a": 1234}
def test_memcache_should_cache():
key = CacheKey("foo", None)
def sc(value):
return value["a"] != 1234
with patch("data.cache.impl.Client", MockClient):
cache = MemcachedModelCache(("127.0.0.1", "-1"))
assert cache.retrieve(key, lambda: {"a": 1234}, should_cache=sc) == {"a": 1234}
# Ensure not cached since it was `1234`.
assert cache._get_client().get(key.key) is None
# Ensure cached.
assert cache.retrieve(key, lambda: {"a": 2345}, should_cache=sc) == {"a": 2345}
assert cache._get_client().get(key.key) is not None
assert cache.retrieve(key, lambda: {"a": 2345}, should_cache=sc) == {"a": 2345}
| [((20, 1, 20, 89), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(20, 25, 20, 37): '"""cache_type"""', (20, 39, 20, 88): '[NoopDataModelCache, InMemoryDataModelCache]'}, {}), "('cache_type', [NoopDataModelCache,\n InMemoryDataModelCache])", False, 'import pytest\n'), ((22, 10, 22, 32), 'data.cache.cache_key.CacheKey', 'CacheKey', ({(22, 19, 22, 24): '"""foo"""', (22, 26, 22, 31): '"""60m"""'}, {}), "('foo', '60m')", False, 'from data.cache.cache_key import CacheKey\n'), ((31, 10, 31, 32), 'data.cache.cache_key.CacheKey', 'CacheKey', ({(31, 19, 31, 24): '"""foo"""', (31, 26, 31, 31): '"""60m"""'}, {}), "('foo', '60m')", False, 'from data.cache.cache_key import CacheKey\n'), ((39, 10, 39, 31), 'data.cache.cache_key.CacheKey', 'CacheKey', ({(39, 19, 39, 24): '"""foo"""', (39, 26, 39, 30): 'None'}, {}), "('foo', None)", False, 'from data.cache.cache_key import CacheKey\n'), ((32, 9, 32, 52), 'mock.patch', 'patch', ({(32, 15, 32, 39): '"""data.cache.impl.Client"""', (32, 41, 32, 51): 'MockClient'}, {}), "('data.cache.impl.Client', MockClient)", False, 'from mock import patch\n'), ((33, 16, 33, 56), 'data.cache.MemcachedModelCache', 'MemcachedModelCache', ({(33, 36, 33, 55): "('127.0.0.1', '-1')"}, {}), "(('127.0.0.1', '-1'))", False, 'from data.cache import InMemoryDataModelCache, NoopDataModelCache, MemcachedModelCache\n'), ((44, 9, 44, 52), 'mock.patch', 'patch', ({(44, 15, 44, 39): '"""data.cache.impl.Client"""', (44, 41, 44, 51): 'MockClient'}, {}), "('data.cache.impl.Client', MockClient)", False, 'from mock import patch\n'), ((45, 16, 45, 56), 'data.cache.MemcachedModelCache', 'MemcachedModelCache', ({(45, 36, 45, 55): "('127.0.0.1', '-1')"}, {}), "(('127.0.0.1', '-1'))", False, 'from data.cache import InMemoryDataModelCache, NoopDataModelCache, MemcachedModelCache\n')] |
mazmat-panw/content | Packs/HealthCheck/Scripts/HealthCheckIncidentsCreatedMonthly/HealthCheckIncidentsCreatedMonthly.py | 024a65c1dea2548e2637a9cbbe54966e9e34a722 | import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
ctx = demisto.context()
dataFromCtx = ctx.get("widgets")
if not dataFromCtx:
incident = demisto.incidents()[0]
accountName = incident.get('account')
accountName = f"acc_{accountName}" if accountName != "" else ""
stats = demisto.executeCommand(
"demisto-api-post",
{
"uri": f"{accountName}/statistics/widgets/query",
"body": {
"size": 13,
"dataType": "incidents",
"query": "",
"dateRange": {
"period": {
"byFrom": "months",
"fromValue": 12
}
},
"widgetType": "line",
"params": {
"groupBy": [
"occurred(m)",
"null"
],
"timeFrame": "months"
},
},
})
res = stats[0]["Contents"]["response"]
buildNumber = demisto.executeCommand("DemistoVersion", {})[0]['Contents']['DemistoVersion']['buildNumber']
buildNumber = f'{buildNumber}' if buildNumber != "REPLACE_THIS_WITH_CI_BUILD_NUM" else "618658"
if int(buildNumber) >= 618657:
# Line graph:
data = {
"Type": 17,
"ContentsFormat": "line",
"Contents": {
"stats": res,
"params": {
"timeFrame": "months"
}
}
}
else:
# Bar graph:
output = []
for entry in res:
output.append({"name": entry["name"], "data": entry["data"]})
data = {
"Type": 17,
"ContentsFormat": "bar",
"Contents": {
"stats": output,
"params": {
"layout": "horizontal"
}
}
}
demisto.results(data)
else:
data = {
"Type": 17,
"ContentsFormat": "line",
"Contents": {
"stats": dataFromCtx['IncidentsCreatedMonthly'],
"params": {
"timeFrame": "months"
}
}
}
demisto.results(data)
| [((4, 6, 4, 23), 'demistomock.context', 'demisto.context', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((11, 12, 34, 10), 'demistomock.executeCommand', 'demisto.executeCommand', ({(12, 8, 12, 26): '"""demisto-api-post"""', (13, 8, 34, 9): "{'uri': f'{accountName}/statistics/widgets/query', 'body': {'size': 13,\n 'dataType': 'incidents', 'query': '', 'dateRange': {'period': {'byFrom':\n 'months', 'fromValue': 12}}, 'widgetType': 'line', 'params': {'groupBy':\n ['occurred(m)', 'null'], 'timeFrame': 'months'}}}"}, {}), "('demisto-api-post', {'uri':\n f'{accountName}/statistics/widgets/query', 'body': {'size': 13,\n 'dataType': 'incidents', 'query': '', 'dateRange': {'period': {'byFrom':\n 'months', 'fromValue': 12}}, 'widgetType': 'line', 'params': {'groupBy':\n ['occurred(m)', 'null'], 'timeFrame': 'months'}}})", True, 'import demistomock as demisto\n'), ((71, 4, 71, 25), 'demistomock.results', 'demisto.results', ({(71, 20, 71, 24): 'data'}, {}), '(data)', True, 'import demistomock as demisto\n'), ((83, 4, 83, 25), 'demistomock.results', 'demisto.results', ({(83, 20, 83, 24): 'data'}, {}), '(data)', True, 'import demistomock as demisto\n'), ((7, 15, 7, 34), 'demistomock.incidents', 'demisto.incidents', ({}, {}), '()', True, 'import demistomock as demisto\n'), ((38, 18, 38, 62), 'demistomock.executeCommand', 'demisto.executeCommand', ({(38, 41, 38, 57): '"""DemistoVersion"""', (38, 59, 38, 61): '{}'}, {}), "('DemistoVersion', {})", True, 'import demistomock as demisto\n')] |
qzlydao/Bert_Sentiment_Analysis | Bert_training.py | 2da2d0c6da2cdb55f37ff0a7e95f0ea4876b2d61 | from torch.utils.data import DataLoader
from dataset.wiki_dataset import BERTDataset
from models.bert_model import *
from tqdm import tqdm
import numpy as np
import pandas as pd
import os
config = {}
config['train_corpus_path'] = './corpus/train_wiki.txt'
config['test_corpus_path'] = './corpus/test_wiki.txt'
config['word2idx_path'] = './corpus/bert_word2idx_extend.json'
config['output_path'] = './output_wiki_bert'
config['batch_size'] = 1
config['max_seq_len'] = 200
config['vocab_size'] = 32162
config['lr'] = 2e-6
config['num_workers'] = 0
class Pretrainer:
def __init__(self, bert_model,
vocab_size, max_seq_len,
batch_size, lr, with_cuda=True):
# 词量, 注意这里实际字(词)汇量 = vocab_size - 20
# 因为前20个token用来做一些特殊功能,如padding等
self.vocab_size = vocab_size
self.batch_size = batch_size
self.lr = lr
cuda_condition = torch.cuda.is_available() and with_cuda
self.device = torch.device('cuda:0' if cuda_condition else 'cpu')
# 限定单句最大长度
self.max_seq_len = max_seq_len
# 初始化超参数的配置
bertconfig = BertConfig(vocab_size=config['vocab_size'])
# 初始化bert模型
self.bert_model = bert_model(config=bertconfig)
self.bert_model.to(self.device)
# 初始化训练数据集
train_dataset = BERTDataset(corpus_path=config['train_corpus_path'],
word2idx_path=config['word2idx_path'],
seq_len=self.max_seq_len,
hidden_dim=bertconfig.hidden_size,
on_memory=False)
# 初始化训练dataloader
self.train_dataloader = DataLoader(train_dataset,
batch_size=config['batch_size'],
num_workers=config['num_workers'],
collate_fn=lambda x:x)
# 初始化测试数据集
test_dataset = BERTDataset(corpus_path=config['test_corpus_path'],
word2idx_path=config['word2idx_path'],
seq_len=self.max_seq_len,
hidden_dim=bertconfig.hidden_size,
on_memory=True)
# 初始化测试dataloader
self.test_dataloader = DataLoader(test_dataset, batch_size=self.batch_size,
num_workers=config['num_workers'],
collate_fn=lambda x: x)
# 初始化positional_encoding [max_seq_len, hidden_size]
self.positional_enc = self.init_positional_encoding(hidden_dim=bertconfig.hidden_size,
max_seq_len=self.max_seq_len)
# 拓展positional_encoding的维度为[1, max_seq_len, hidden_size]
self.positional_enc = torch.unsqueeze(self.positional_enc, dim=0)
# 列举需要优化的参数并传入优化器
optim_parameters = list(self.bert_model.parameters())
self.optimizer = torch.optim.Adam(optim_parameters, lr=self.lr)
print('Total Parameters:', sum(p.nelement() for p in self.bert_model.parameters()))
def init_positional_encoding(self, hidden_dim, max_seq_len):
position_enc = np.array([
[pos / np.power(10000, 2 * i / hidden_dim) for i in range(hidden_dim)]
if pos != 0 else np.zeros(hidden_dim) for pos in range(max_seq_len)
])
# dim=2i
position_enc[1:, 0::2] = np.sin(position_enc[1:, 0::2])
# dim=2i+1
position_enc[1:, 1::2] = np.sin(position_enc[1:, 1::2])
# todo 归一化处理 why? 用位置嵌入的每一行除以它的模长
denominator = np.sqrt(np.sum(position_enc**2, axis=1, keepdims=True)) # 作为分母
position_enc /= (denominator + 1e-8)
position_enc = torch.from_numpy(position_enc).type(torch.FloatTensor)
return position_enc
def test(self, epoch, df_path='./output_wiki_bert/df_log.pickle'):
self.bert_model.eval()
with torch.no_grad():
return self.iteration(epoch, self.test_dataloader, train=False, df_path=df_path)
def load_model(self, model, dir_path='./output'):
# 加载模型
checkpoint_dir = self.find_most_recent_state_dict(dir_path)
checkpoint = torch.load(checkpoint_dir)
# todo key在哪保存的
model.load_state_dict(checkpoint['model_state_dict'], strict=False)
torch.cuda.empty_cache()
model.to(self.device)
print('{} loaded for training!'.format(checkpoint_dir))
def train(self, epoch, df_path='./output_wiki_bert/df_log.pickle'):
self.bert_model.train()
self.iteration(epoch, self.train_dataloader, train=True, df_path=df_path)
def compute_loss(self, preditions, labels, num_class=2, ignore_index=None):
if ignore_index is None:
loss_func = CrossEntropyLoss()
else:
loss_func = CrossEntropyLoss(ignore_index=ignore_index)
return loss_func(preditions.view(-1, num_class), labels.view(-1))
def get_mlm_accuracy(self, predictions, labels):
# predictions [batch_size, seq_len, vocab_size]
predictions = torch.argmax(predictions, dim=-1, keepdim=False) # predictions: [batch_size, seq_len]
# labels: [batch_size, seq_len]
mask = (labels > 0) # 只考虑被MASK的token
# 预测正确的数量
pred_correct = torch.sum((predictions == labels) * mask).float()
# accuracy
mlm_accuracy = pred_correct / (torch.sum(mask).float() + 1e-8)
return mlm_accuracy.item()
def padding(self, output_dic_list):
# todo output_dic_list的格式
# [batch_size, seq_len, embed_dim]
bert_input = [i['bert_input'] for i in output_dic_list]
bert_label = [i['bert_label'] for i in output_dic_list]
segment_label = [i['segment_label'] for i in output_dic_list]
# padding
bert_input = torch.nn.utils.rnn.pad_sequence(bert_input, batch_first=True)
bert_label = torch.nn.utils.rnn.pad_sequence(bert_label, batch_first=True)
segment_label = torch.nn.utils.rnn.pad_sequence(segment_label, batch_first=True)
# [batch_size]
is_next = torch.cat([i['is_next'] for i in output_dic_list])
return {
'bert_input': bert_input,
'bert_label': bert_label,
'segment_label': segment_label,
'is_next': is_next
}
def find_most_recent_state_dict(self, dir_path):
if not os.path.exists(dir_path):
os.mkdir(dir_path)
dic_list = [i for i in os.listdir(dir_path)]
if len(dic_list) == 0:
raise FileNotFoundError('can not find any state dict in {}'.format(dir_path))
# todo model什么时候存放的?
dic_list = [i for i in dic_list if 'model' in i]
dic_list = sorted(dic_list, key=lambda k: int(k.split('.')[-1]))
return dir_path + '/' + dic_list[-1]
def iteration(self, epoch, data_loader, train=True, df_path='./output_wiki_bert/df_log.pickle'):
if not os.path.isfile(df_path) and epoch != 0:
raise RuntimeError("log DataFrame path not found and can't create a new one because we're not training from scratch!")
if not os.path.isfile(df_path) and epoch == 0:
df = pd.DataFrame(columns=['epoch', 'train_next_sen_loss', 'train_mlm_loss',
'train_next_sen_acc', 'train_mlm_acc',
'test_next_sen_loss', 'test_mlm_loss',
'test_next_sen_acc', 'test_mlm_acc'])
df.to_pickle(df_path)
print('log DataFrame created!')
str_code = 'train' if train else 'test'
# 设置进度条,得到迭代器对象
data_iter = tqdm(enumerate(data_loader),
desc='EP_%s:%d' % (str_code, epoch),
total=len(data_loader),
bar_format='{l_bar}{r_bar}')
total_next_sen_loss = 0
total_mlm_loss = 0
total_next_sen_acc = 0
total_mlm_acc = 0
total_element = 0
for i, data in data_iter:
data = self.padding(data)
# 0. batch_data will be sent into the device
data = {key: value.to(self.device) for key, value in data.items()}
# todo data['bert_input'] 的维度
positional_enc = self.positional_enc[:, :data['bert_input'].size()[-1], :].to(self.device)
# 1. forward the next_sentence_prediction and masked_lm_model
# mlm_preds: [batch_size, seq_len, vocab_size]
# next_sen_preds: [batch_size, seq_len]
mlm_preds, next_sen_preds = self.bert_model.forward(input_ids=data['bert_input'],
positional_enc=positional_enc,
token_type_ids=data['segment_label'])
mlm_acc = self.get_mlm_accuracy(mlm_preds, data['bert_label'])
next_sen_acc = next_sen_preds.argmax(dim=-1, keepdim=False).eq(data['is_next']).sum().item()
mlm_loss = self.compute_loss(mlm_preds, data['bert_label'], self.vocab_size, ignore_index=0)
next_sen_loss = self.compute_loss(next_sen_preds, data['is_next'])
# 两个任务联合训练
loss = mlm_loss + next_sen_loss
# 3. 反向传播和梯度更新
if train:
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
total_next_sen_loss += next_sen_loss.item()
total_mlm_loss += mlm_loss.item()
total_next_sen_acc += next_sen_acc
total_element += data['is_next'].nelement()
total_mlm_acc += mlm_acc
if train:
log_dict = {
'epoch': epoch,
'train_next_sen_loss': total_next_sen_loss / (i + 1),
'train_mlm_loss': total_mlm_loss / (i + 1),
'train_next_sen_acc': total_next_sen_acc / total_element,
'train_mlm_acc': total_mlm_acc / (i + 1),
'test_next_sen_loss': 0, 'test_mlm_loss':0,
'test_next_sen_acc':0, 'test_mlm_acc':0
}
else:
log_dict = {
'epoch': epoch,
'test_next_sen_loss': total_next_sen_loss / (i + 1),
'test_mlm_loss': total_mlm_loss / (i + 1),
'test_next_sen_acc': total_next_sen_acc / total_element,
'test_mlm_acc': total_mlm_acc / (i + 1),
'train_next_sen_loss': 0, 'train_mlm_loss': 0,
'train_next_sen_acc': 0, 'train_mlm_acc': 0
}
if i % 10 == 0:
data_iter.write(str({k: v for k, v in log_dict.items() if v != 0 and k != 'epoch'}))
if train:
df = pd.read_pickle(df_path)
# 将日志信息追加到df中
df = df.append([log_dict])
# 重置索引
df.reset_index(inplace=True, drop=True)
# 保存到本地
df.to_pickle(df_path)
else:
log_dict = {k: v for k, v in log_dict.items() if v != 0 and k != 'epoch'}
df = pd.read_pickle(df_path)
df.reset_index(inplace=True, drop=True)
for k, v in log_dict.items():
df.at[epoch, k] = v
df.to_pickle(df_path)
return float(log_dict['test_next_sen_loss']) + float(log_dict['test_mlm_loss'])
def save_state_dict(self, model, epoch, dir_path='./output', file_path='bert.model'):
if not os.path.exists(dir_path):
os.mkdir(dir_path)
save_path = dir_path + '/' + file_path + '.epoch.{}'.format(str(epoch))
model.to('cpu')
torch.save({'model_state_dict': model.state_dict()}, save_path)
print('{} saved!'.format(save_path))
model.to(self.device)
if __name__ == '__main__':
def init_trainer(dynamic_lr, load_model=False):
trainer = Pretrainer(BertForPreTraining,
vocab_size=config['vocab_size'],
max_seq_len=config['max_seq_len'],
batch_size=config['batch_size'],
lr=dynamic_lr,
with_cuda=True)
if load_model:
trainer.load_model(trainer.bert_model, dir_path=config['output_path'])
return trainer
start_epoch = 3
train_epoches = 1
trainer = init_trainer(config['lr'], load_model=True)
all_loss = []
threshold = 0
patient = 10
best_f1 = 0
dynamic_lr = config['lr']
# todo start_epoch 为什么要从3开始
for epoch in range(start_epoch, start_epoch + train_epoches):
print('train with learning rate {}'.format(str(dynamic_lr)))
trainer.train(epoch)
trainer.save_state_dict(trainer.bert_model, epoch, dir_path=config['output_path'],
file_path='bert.model')
trainer.test(epoch)
| [((42, 24, 46, 52), 'dataset.wiki_dataset.BERTDataset', 'BERTDataset', (), '', False, 'from dataset.wiki_dataset import BERTDataset\n'), ((48, 32, 51, 65), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import DataLoader\n'), ((53, 23, 57, 50), 'dataset.wiki_dataset.BERTDataset', 'BERTDataset', (), '', False, 'from dataset.wiki_dataset import BERTDataset\n'), ((59, 31, 61, 65), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import DataLoader\n'), ((82, 33, 82, 63), 'numpy.sin', 'np.sin', ({(82, 40, 82, 62): 'position_enc[1:, 0::2]'}, {}), '(position_enc[1:, 0::2])', True, 'import numpy as np\n'), ((84, 33, 84, 63), 'numpy.sin', 'np.sin', ({(84, 40, 84, 62): 'position_enc[1:, 1::2]'}, {}), '(position_enc[1:, 1::2])', True, 'import numpy as np\n'), ((86, 30, 86, 76), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((150, 15, 150, 39), 'os.path.exists', 'os.path.exists', ({(150, 30, 150, 38): 'dir_path'}, {}), '(dir_path)', False, 'import os\n'), ((151, 12, 151, 30), 'os.mkdir', 'os.mkdir', ({(151, 21, 151, 29): 'dir_path'}, {}), '(dir_path)', False, 'import os\n'), ((165, 17, 168, 76), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((244, 17, 244, 40), 'pandas.read_pickle', 'pd.read_pickle', ({(244, 32, 244, 39): 'df_path'}, {}), '(df_path)', True, 'import pandas as pd\n'), ((253, 17, 253, 40), 'pandas.read_pickle', 'pd.read_pickle', ({(253, 32, 253, 39): 'df_path'}, {}), '(df_path)', True, 'import pandas as pd\n'), ((262, 15, 262, 39), 'os.path.exists', 'os.path.exists', ({(262, 30, 262, 38): 'dir_path'}, {}), '(dir_path)', False, 'import os\n'), ((263, 12, 263, 30), 'os.mkdir', 'os.mkdir', ({(263, 21, 263, 29): 'dir_path'}, {}), '(dir_path)', False, 'import os\n'), ((152, 31, 152, 51), 'os.listdir', 'os.listdir', ({(152, 42, 152, 50): 'dir_path'}, {}), '(dir_path)', False, 'import os\n'), ((161, 15, 161, 38), 'os.path.isfile', 'os.path.isfile', ({(161, 30, 161, 37): 'df_path'}, {}), '(df_path)', False, 'import os\n'), ((164, 15, 164, 38), 'os.path.isfile', 'os.path.isfile', ({(164, 30, 164, 37): 'df_path'}, {}), '(df_path)', False, 'import os\n'), ((78, 29, 78, 49), 'numpy.zeros', 'np.zeros', ({(78, 38, 78, 48): 'hidden_dim'}, {}), '(hidden_dim)', True, 'import numpy as np\n'), ((77, 19, 77, 54), 'numpy.power', 'np.power', ({(77, 28, 77, 33): '10000', (77, 35, 77, 53): '2 * i / hidden_dim'}, {}), '(10000, 2 * i / hidden_dim)', True, 'import numpy as np\n')] |
appliedml85/triton | python/triton/language/random.py | 8bedcce9befbbe95d8fe0a082718edc4050e2831 | import triton
import triton.language as tl
# Notes
# 1. triton doesn't support uint32, so we use int32 instead and benefit from the fact that two's complement operations are equivalent to uint operations.
# 2. multiply_low_high is currently inefficient.
# 3. Even though technically philox sampling outputs int, in many places we pretends they were actualy uints e.g. uint_to_uniform_float
@triton.jit
def PHILOX_KEY_A():
# 0x9E3779B9
return -1640531527
@triton.jit
def PHILOX_KEY_B():
# 0xBB67AE85
return -1150833019
@triton.jit
def PHILOX_ROUND_A():
# 0xD2511F53
return -766435501
@triton.jit
def PHILOX_ROUND_B():
# 0xCD9E8D57
return -845247145
@triton.jit
def hacky_to_uint64(x):
return ((x >> 1).to(tl.int64) << 1) + (x & 1).to(tl.int64)
@triton.jit
def multiply_low_high(a, b):
return (
a * b,
((hacky_to_uint64(a) * hacky_to_uint64(b)) >> 32).to(tl.int32)
)
@triton.jit
def single_round(c0, c1, c2, c3, k0, k1):
A = PHILOX_ROUND_A()
B = PHILOX_ROUND_B()
lo0, hi0 = multiply_low_high(A, c0)
lo1, hi1 = multiply_low_high(B, c2)
return (
hi1 ^ c1 ^ k0,
lo1,
hi0 ^ c3 ^ k1,
lo0,
)
@triton.jit
def raise_key(k0, k1):
return (
k0 + PHILOX_KEY_A(),
k1 + PHILOX_KEY_B(),
)
@triton.jit
def philox_f(c0, c1, c2, c3, k0, k1):
c0, c1, c2, c3 = single_round(c0, c1, c2, c3, k0, k1)
k0, k1 = raise_key(k0, k1)
c0, c1, c2, c3 = single_round(c0, c1, c2, c3, k0, k1)
k0, k1 = raise_key(k0, k1)
c0, c1, c2, c3 = single_round(c0, c1, c2, c3, k0, k1)
k0, k1 = raise_key(k0, k1)
c0, c1, c2, c3 = single_round(c0, c1, c2, c3, k0, k1)
k0, k1 = raise_key(k0, k1)
c0, c1, c2, c3 = single_round(c0, c1, c2, c3, k0, k1)
k0, k1 = raise_key(k0, k1)
c0, c1, c2, c3 = single_round(c0, c1, c2, c3, k0, k1)
k0, k1 = raise_key(k0, k1)
c0, c1, c2, c3 = single_round(c0, c1, c2, c3, k0, k1)
k0, k1 = raise_key(k0, k1)
c0, c1, c2, c3 = single_round(c0, c1, c2, c3, k0, k1)
k0, k1 = raise_key(k0, k1)
c0, c1, c2, c3 = single_round(c0, c1, c2, c3, k0, k1)
k0, k1 = raise_key(k0, k1)
c0, c1, c2, c3 = single_round(c0, c1, c2, c3, k0, k1)
return c0, c1, c2, c3
@triton.jit
def uint32_to_uniform_float(x):
"""
Numerically stable function to convert a random integer into a random float uniformly sampled in [0, 1).
This is originally designed from uint32, but it works with int32 too as long as the int32 uniformly
covers all the possible values it can take.
"""
mantissa = x & 0x7fffff
exp = 127
res = mantissa | (exp << 23)
return res.to(tl.float32, bitcast=True) - 1.0
@triton.jit
def pair_uniform_to_normal(u1, u2):
"""Box-Muller transform"""
u1 = tl.maximum(1.0e-7, u1)
th = 6.283185307179586 * u2
r = tl.sqrt(-2.0 * tl.log(u1))
return r * tl.cos(th), r * tl.sin(th)
@triton.jit
def randint4x(seed, offset):
"""
Given a :code:`seed` scalar and an :code:`offset` block, returns four
blocks of random :code:`int32`.
This is the maximally efficient entry point
to Triton's Philox pseudo-random number generator.
:param seed: The seed for generating random numbers.
:param offsets: The offsets to generate random numbers for.
"""
z = 0
return philox_f(offset, z, z, z, seed, z)
@triton.jit
def randint(seed, offset):
"""
Given a :code:`seed` scalar and an :code:`offset` block, returns a single
block of random :code:`int32`.
If you need multiple streams of random numbers,
using `randint4x` is likely to be faster than calling `randint` 4 times.
:param seed: The seed for generating random numbers.
:param offsets: The offsets to generate random numbers for.
"""
ret, _, _, _ = randint4x(seed, offset)
return ret
@triton.jit
def rand(seed, offset):
"""
Given a :code:`seed` scalar and an :code:`offset` block,
returns a block of random :code:`float32` in :math:`U(0, 1)`
:param seed: The seed for generating random numbers.
:param offsets: The offsets to generate random numbers for.
"""
source = randint(seed, offset)
return uint32_to_uniform_float(source)
@triton.jit
def randn(seed, offset):
"""
Given a :code:`seed` scalar and an :code:`offset` block,
returns a block of random :code:`float32` in :math:`\mathcal{N}(0, 1)`
:param seed: The seed for generating random numbers.
:param offsets: The offsets to generate random numbers for.
"""
i1, i2, _, _ = randint4x(seed, offset)
u1 = uint32_to_uniform_float(i1)
u2 = uint32_to_uniform_float(i2)
n1, _ = pair_uniform_to_normal(u1, u2)
return n1
@triton.jit
def rand4x(seed, offsets):
"""
Given a :code:`seed` scalar and an :code:`offsets` block,
returns a 4 blocks of random :code:`float32` in :math:`U(0, 1)`
:param seed: The seed for generating random numbers.
:param offsets: The offsets to generate random numbers for.
"""
i1, i2, i3, i4 = randint4x(seed, offsets)
u1 = uint32_to_uniform_float(i1)
u2 = uint32_to_uniform_float(i2)
u3 = uint32_to_uniform_float(i3)
u4 = uint32_to_uniform_float(i4)
return u1, u2, u3, u4
@triton.jit
def randn4x(seed, offset):
"""
Given a :code:`seed` scalar and an :code:`offset` block,
returns a 4 blocks of random :code:`float32` in :math:`\mathcal{N}(0, 1)`
:param seed: The seed for generating random numbers.
:param offsets: The offsets to generate random numbers for.
"""
u1, u2, u3, u4 = rand4x(seed, offset)
n1, n2 = pair_uniform_to_normal(u1, u2)
n3, n4 = pair_uniform_to_normal(u3, u4)
return n1, n2, n3, n4
| [((112, 9, 112, 31), 'triton.language.maximum', 'tl.maximum', ({(112, 20, 112, 26): '1e-07', (112, 28, 112, 30): 'u1'}, {}), '(1e-07, u1)', True, 'import triton.language as tl\n'), ((114, 23, 114, 33), 'triton.language.log', 'tl.log', ({(114, 30, 114, 32): 'u1'}, {}), '(u1)', True, 'import triton.language as tl\n'), ((115, 15, 115, 25), 'triton.language.cos', 'tl.cos', ({(115, 22, 115, 24): 'th'}, {}), '(th)', True, 'import triton.language as tl\n'), ((115, 31, 115, 41), 'triton.language.sin', 'tl.sin', ({(115, 38, 115, 40): 'th'}, {}), '(th)', True, 'import triton.language as tl\n')] |
kensho-technologies/pyctcdecode | pyctcdecode/__init__.py | c33f94bce283ea9af79d30e2b815e3bf34a137c9 | # Copyright 2021-present Kensho Technologies, LLC.
from .alphabet import Alphabet # noqa
from .decoder import BeamSearchDecoderCTC, build_ctcdecoder # noqa
from .language_model import LanguageModel # noqa
__package_name__ = "pyctcdecode"
__version__ = "0.3.0"
| [] |
marky1991/Legend-of-Wumpus | wumpus/start_server.py | b53f4a520cea274ddb4c40c6ab4f42a68008896f | from wumpus.server import Server
from circuits import Debugger
s = Server("0.0.0.0", 50551) + Debugger()
s.run()
import sys
sys.exit(1)
| [((7, 0, 7, 11), 'sys.exit', 'sys.exit', ({(7, 9, 7, 10): '(1)'}, {}), '(1)', False, 'import sys\n'), ((4, 4, 4, 28), 'wumpus.server.Server', 'Server', ({(4, 11, 4, 20): '"""0.0.0.0"""', (4, 22, 4, 27): '(50551)'}, {}), "('0.0.0.0', 50551)", False, 'from wumpus.server import Server\n'), ((4, 31, 4, 41), 'circuits.Debugger', 'Debugger', ({}, {}), '()', False, 'from circuits import Debugger\n')] |
BlackLight/platypush | platypush/backend/joystick/linux/__init__.py | 6c0a8bf2599eb4ab41a6122dbd988075d8b1a63a | import array
import struct
import time
from fcntl import ioctl
from typing import IO
from platypush.backend import Backend
from platypush.message.event.joystick import JoystickConnectedEvent, JoystickDisconnectedEvent, \
JoystickButtonPressedEvent, JoystickButtonReleasedEvent, JoystickAxisEvent
class JoystickLinuxBackend(Backend):
"""
This backend intercepts events from joystick devices through the native Linux API implementation.
It is loosely based on https://gist.github.com/rdb/8864666, which itself uses the
`Linux kernel joystick API <https://www.kernel.org/doc/Documentation/input/joystick-api.txt>`_ to interact with
the devices.
Triggers:
* :class:`platypush.message.event.joystick.JoystickConnectedEvent` when the joystick is connected.
* :class:`platypush.message.event.joystick.JoystickDisconnectedEvent` when the joystick is disconnected.
* :class:`platypush.message.event.joystick.JoystickButtonPressedEvent` when a joystick button is pressed.
* :class:`platypush.message.event.joystick.JoystickButtonReleasedEvent` when a joystick button is released.
* :class:`platypush.message.event.joystick.JoystickAxisEvent` when an axis value of the joystick changes.
"""
# These constants were borrowed from linux/input.h
axis_names = {
0x00: 'x',
0x01: 'y',
0x02: 'z',
0x03: 'rx',
0x04: 'ry',
0x05: 'rz',
0x06: 'throttle',
0x07: 'rudder',
0x08: 'wheel',
0x09: 'gas',
0x0a: 'brake',
0x10: 'hat0x',
0x11: 'hat0y',
0x12: 'hat1x',
0x13: 'hat1y',
0x14: 'hat2x',
0x15: 'hat2y',
0x16: 'hat3x',
0x17: 'hat3y',
0x18: 'pressure',
0x19: 'distance',
0x1a: 'tilt_x',
0x1b: 'tilt_y',
0x1c: 'tool_width',
0x20: 'volume',
0x28: 'misc',
}
button_names = {
0x120: 'trigger',
0x121: 'thumb',
0x122: 'thumb2',
0x123: 'top',
0x124: 'top2',
0x125: 'pinkie',
0x126: 'base',
0x127: 'base2',
0x128: 'base3',
0x129: 'base4',
0x12a: 'base5',
0x12b: 'base6',
0x12f: 'dead',
0x130: 'a',
0x131: 'b',
0x132: 'c',
0x133: 'x',
0x134: 'y',
0x135: 'z',
0x136: 'tl',
0x137: 'tr',
0x138: 'tl2',
0x139: 'tr2',
0x13a: 'select',
0x13b: 'start',
0x13c: 'mode',
0x13d: 'thumbl',
0x13e: 'thumbr',
0x220: 'dpad_up',
0x221: 'dpad_down',
0x222: 'dpad_left',
0x223: 'dpad_right',
# XBox 360 controller uses these codes.
0x2c0: 'dpad_left',
0x2c1: 'dpad_right',
0x2c2: 'dpad_up',
0x2c3: 'dpad_down',
}
def __init__(self, device: str = '/dev/input/js0', *args, **kwargs):
"""
:param device: Joystick device to monitor (default: ``/dev/input/js0``).
"""
super().__init__(*args, **kwargs)
self.device = device
self._axis_states = {}
self._button_states = {}
self._axis_map = []
self._button_map = []
def _init_joystick(self, dev: IO):
# Get the device name.
buf = array.array('B', [0] * 64)
ioctl(dev, 0x80006a13 + (0x10000 * len(buf)), buf) # JSIOCGNAME(len)
js_name = buf.tobytes().rstrip(b'\x00').decode('utf-8')
# Get number of axes and buttons.
buf = array.array('B', [0])
ioctl(dev, 0x80016a11, buf) # JSIOCGAXES
num_axes = buf[0]
buf = array.array('B', [0])
ioctl(dev, 0x80016a12, buf) # JSIOCGBUTTONS
num_buttons = buf[0]
# Get the axis map.
buf = array.array('B', [0] * 0x40)
ioctl(dev, 0x80406a32, buf) # JSIOCGAXMAP
for axis in buf[:num_axes]:
axis_name = self.axis_names.get(axis, 'unknown(0x%02x)' % axis)
self._axis_map.append(axis_name)
self._axis_states[axis_name] = 0.0
# Get the button map.
buf = array.array('H', [0] * 200)
ioctl(dev, 0x80406a34, buf) # JSIOCGBTNMAP
for btn in buf[:num_buttons]:
btn_name = self.button_names.get(btn, 'unknown(0x%03x)' % btn)
self._button_map.append(btn_name)
self._button_states[btn_name] = 0
self.bus.post(JoystickConnectedEvent(device=self.device, name=js_name, axes=self._axis_map,
buttons=self._button_map))
def run(self):
super().run()
self.logger.info(f'Opening {self.device}...')
while not self.should_stop():
# Open the joystick device.
try:
jsdev = open(self.device, 'rb')
self._init_joystick(jsdev)
except Exception as e:
self.logger.debug(f'Joystick device on {self.device} not available: {e}')
time.sleep(5)
continue
# Joystick event loop
while not self.should_stop():
try:
evbuf = jsdev.read(8)
if evbuf:
_, value, evt_type, number = struct.unpack('IhBB', evbuf)
if evt_type & 0x80: # Initial state notification
continue
if evt_type & 0x01:
button = self._button_map[number]
if button:
self._button_states[button] = value
evt_class = JoystickButtonPressedEvent if value else JoystickButtonReleasedEvent
# noinspection PyTypeChecker
self.bus.post(evt_class(device=self.device, button=button))
if evt_type & 0x02:
axis = self._axis_map[number]
if axis:
fvalue = value / 32767.0
self._axis_states[axis] = fvalue
# noinspection PyTypeChecker
self.bus.post(JoystickAxisEvent(device=self.device, axis=axis, value=fvalue))
except OSError as e:
self.logger.warning(f'Connection to {self.device} lost: {e}')
self.bus.post(JoystickDisconnectedEvent(device=self.device))
break
| [((113, 14, 113, 40), 'array.array', 'array.array', ({(113, 26, 113, 29): '"""B"""', (113, 31, 113, 39): '[0] * 64'}, {}), "('B', [0] * 64)", False, 'import array\n'), ((118, 14, 118, 35), 'array.array', 'array.array', ({(118, 26, 118, 29): '"""B"""', (118, 31, 118, 34): '[0]'}, {}), "('B', [0])", False, 'import array\n'), ((119, 8, 119, 35), 'fcntl.ioctl', 'ioctl', ({(119, 14, 119, 17): 'dev', (119, 19, 119, 29): '(2147576337)', (119, 31, 119, 34): 'buf'}, {}), '(dev, 2147576337, buf)', False, 'from fcntl import ioctl\n'), ((122, 14, 122, 35), 'array.array', 'array.array', ({(122, 26, 122, 29): '"""B"""', (122, 31, 122, 34): '[0]'}, {}), "('B', [0])", False, 'import array\n'), ((123, 8, 123, 35), 'fcntl.ioctl', 'ioctl', ({(123, 14, 123, 17): 'dev', (123, 19, 123, 29): '(2147576338)', (123, 31, 123, 34): 'buf'}, {}), '(dev, 2147576338, buf)', False, 'from fcntl import ioctl\n'), ((127, 14, 127, 42), 'array.array', 'array.array', ({(127, 26, 127, 29): '"""B"""', (127, 31, 127, 41): '[0] * 64'}, {}), "('B', [0] * 64)", False, 'import array\n'), ((128, 8, 128, 35), 'fcntl.ioctl', 'ioctl', ({(128, 14, 128, 17): 'dev', (128, 19, 128, 29): '(2151705138)', (128, 31, 128, 34): 'buf'}, {}), '(dev, 2151705138, buf)', False, 'from fcntl import ioctl\n'), ((136, 14, 136, 41), 'array.array', 'array.array', ({(136, 26, 136, 29): '"""H"""', (136, 31, 136, 40): '[0] * 200'}, {}), "('H', [0] * 200)", False, 'import array\n'), ((137, 8, 137, 35), 'fcntl.ioctl', 'ioctl', ({(137, 14, 137, 17): 'dev', (137, 19, 137, 29): '(2151705140)', (137, 31, 137, 34): 'buf'}, {}), '(dev, 2151705140, buf)', False, 'from fcntl import ioctl\n'), ((144, 22, 145, 70), 'platypush.message.event.joystick.JoystickConnectedEvent', 'JoystickConnectedEvent', (), '', False, 'from platypush.message.event.joystick import JoystickConnectedEvent, JoystickDisconnectedEvent, JoystickButtonPressedEvent, JoystickButtonReleasedEvent, JoystickAxisEvent\n'), ((158, 16, 158, 29), 'time.sleep', 'time.sleep', ({(158, 27, 158, 28): '(5)'}, {}), '(5)', False, 'import time\n'), ((166, 53, 166, 81), 'struct.unpack', 'struct.unpack', ({(166, 67, 166, 73): '"""IhBB"""', (166, 75, 166, 80): 'evbuf'}, {}), "('IhBB', evbuf)", False, 'import struct\n'), ((188, 34, 188, 79), 'platypush.message.event.joystick.JoystickDisconnectedEvent', 'JoystickDisconnectedEvent', (), '', False, 'from platypush.message.event.joystick import JoystickConnectedEvent, JoystickDisconnectedEvent, JoystickButtonPressedEvent, JoystickButtonReleasedEvent, JoystickAxisEvent\n'), ((185, 46, 185, 108), 'platypush.message.event.joystick.JoystickAxisEvent', 'JoystickAxisEvent', (), '', False, 'from platypush.message.event.joystick import JoystickConnectedEvent, JoystickDisconnectedEvent, JoystickButtonPressedEvent, JoystickButtonReleasedEvent, JoystickAxisEvent\n')] |
SaxionMechatronics/Firmware | src/modules/sensors/vehicle_magnetometer/mag_compensation/python/mag_compensation.py | 7393d5d7610dc8d2cb64d90a5359b6c561fb642a | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
File: mag_compensation.py
Author: Tanja Baumann
Email: [email protected]
Github: https://github.com/baumanta
Description:
Computes linear coefficients for mag compensation from thrust and current
Usage:
python mag_compensation.py /path/to/log/logfile.ulg current --instance 1
Remark:
If your logfile does not contain some of the topics, e.g.battery_status/current_a
you will have to comment out the corresponding parts in the script
"""
import matplotlib.pylab as plt
from mpl_toolkits.mplot3d import Axes3D
from pyulog import ULog
from pyulog.px4 import PX4ULog
from pylab import *
import numpy as np
import textwrap as tw
import argparse
#arguments
parser = argparse.ArgumentParser(description='Calculate compensation parameters from ulog')
parser.add_argument('logfile', type=str, nargs='?', default=[],
help='full path to ulog file')
parser.add_argument('type', type=str, nargs='?', choices=['current', 'thrust'], default=[],
help='Power signal used for compensation, supported is "current" or "thrust".')
parser.add_argument('--instance', type=int, nargs='?', default=0,
help='instance of the current or thrust signal to use (0 or 1)')
args = parser.parse_args()
log_name = args.logfile
comp_type = args.type
comp_instance = args.instance
#Load the log data (produced by pyulog)
log = ULog(log_name)
pxlog = PX4ULog(log)
def get_data(topic_name, variable_name, index):
try:
dataset = log.get_dataset(topic_name, index)
return dataset.data[variable_name]
except:
return []
def ms2s_list(time_ms_list):
if len(time_ms_list) > 0:
return 1e-6 * time_ms_list
else:
return time_ms_list
# Select msgs and copy into arrays
armed = get_data('vehicle_status', 'arming_state', 0)
t_armed = ms2s_list(get_data('vehicle_status', 'timestamp', 0))
if comp_type == "thrust":
power = get_data('vehicle_rates_setpoint', 'thrust_body[2]', comp_instance)
power_t = ms2s_list(get_data('vehicle_rates_setpoint', 'timestamp', comp_instance))
comp_type_param = 1
factor = 1
unit = "[G]"
elif comp_type == "current":
power = get_data('battery_status', 'current_a', comp_instance)
power = np.true_divide(power, 1000) #kA
power_t = ms2s_list(get_data('battery_status', 'timestamp', comp_instance))
comp_type_param = 2 + comp_instance
factor = -1
unit = "[G/kA]"
else:
print("unknown compensation type {}. Supported is either 'thrust' or 'current'.".format(comp_type))
sys.exit(1)
if len(power) == 0:
print("could not retrieve power signal from log, zero data points")
sys.exit(1)
mag0X_body = get_data('sensor_mag', 'x', 0)
mag0Y_body = get_data('sensor_mag', 'y', 0)
mag0Z_body = get_data('sensor_mag', 'z', 0)
t_mag0 = ms2s_list(get_data('sensor_mag', 'timestamp', 0))
mag0_ID = get_data('sensor_mag', 'device_id', 0)
mag1X_body = get_data('sensor_mag', 'x', 1)
mag1Y_body = get_data('sensor_mag', 'y', 1)
mag1Z_body = get_data('sensor_mag', 'z', 1)
t_mag1 = ms2s_list(get_data('sensor_mag', 'timestamp', 1))
mag1_ID = get_data('sensor_mag', 'device_id', 1)
mag2X_body = get_data('sensor_mag', 'x', 2)
mag2Y_body = get_data('sensor_mag', 'y', 2)
mag2Z_body = get_data('sensor_mag', 'z', 2)
t_mag2 = ms2s_list(get_data('sensor_mag', 'timestamp', 2))
mag2_ID = get_data('sensor_mag', 'device_id', 2)
mag3X_body = get_data('sensor_mag', 'x', 3)
mag3Y_body = get_data('sensor_mag', 'y', 3)
mag3Z_body = get_data('sensor_mag', 'z', 3)
t_mag3 = ms2s_list(get_data('sensor_mag', 'timestamp', 3))
mag3_ID = get_data('sensor_mag', 'device_id', 3)
magX_body = []
magY_body = []
magZ_body = []
mag_id = []
t_mag = []
if len(mag0X_body) > 0:
magX_body.append(mag0X_body)
magY_body.append(mag0Y_body)
magZ_body.append(mag0Z_body)
t_mag.append(t_mag0)
mag_id.append(mag0_ID[0])
if len(mag1X_body) > 0:
magX_body.append(mag1X_body)
magY_body.append(mag1Y_body)
magZ_body.append(mag1Z_body)
t_mag.append(t_mag1)
mag_id.append(mag1_ID[0])
if len(mag2X_body) > 0:
magX_body.append(mag2X_body)
magY_body.append(mag2Y_body)
magZ_body.append(mag2Z_body)
t_mag.append(t_mag2)
mag_id.append(mag2_ID[0])
if len(mag3X_body) > 0:
magX_body.append(mag3X_body)
magY_body.append(mag3Y_body)
magZ_body.append(mag3Z_body)
t_mag.append(t_mag3)
mag_id.append(mag3_ID[0])
n_mag = len(magX_body)
#log index does not necessarily match mag calibration instance number
calibration_instance = []
instance_found = False
for idx in range(n_mag):
instance_found = False
for j in range(4):
if mag_id[idx] == log.initial_parameters["CAL_MAG{}_ID".format(j)]:
calibration_instance.append(j)
instance_found = True
if not instance_found:
print('Mag {} calibration instance not found, run compass calibration first.'.format(mag_id[idx]))
#get first arming sequence from data
start_time = 0
stop_time = 0
for i in range(len(armed)-1):
if armed[i] == 1 and armed[i+1] == 2:
start_time = t_armed[i+1]
if armed[i] == 2 and armed[i+1] == 1:
stop_time = t_armed[i+1]
break
#cut unarmed sequences from mag data
index_start = 0
index_stop = 0
for idx in range(n_mag):
for i in range(len(t_mag[idx])):
if t_mag[idx][i] > start_time:
index_start = i
break
for i in range(len(t_mag[idx])):
if t_mag[idx][i] > stop_time:
index_stop = i -1
break
t_mag[idx] = t_mag[idx][index_start:index_stop]
magX_body[idx] = magX_body[idx][index_start:index_stop]
magY_body[idx] = magY_body[idx][index_start:index_stop]
magZ_body[idx] = magZ_body[idx][index_start:index_stop]
#resample data
power_resampled = []
for idx in range(n_mag):
power_resampled.append(interp(t_mag[idx], power_t, power))
#fit linear to get coefficients
px = []
py = []
pz = []
for idx in range(n_mag):
px_temp, res_x, _, _, _ = polyfit(power_resampled[idx], magX_body[idx], 1,full = True)
py_temp, res_y, _, _, _ = polyfit(power_resampled[idx], magY_body[idx], 1,full = True)
pz_temp, res_z, _, _, _ = polyfit(power_resampled[idx], magZ_body[idx], 1, full = True)
px.append(px_temp)
py.append(py_temp)
pz.append(pz_temp)
#print to console
for idx in range(n_mag):
print('Mag{} device ID {} (calibration instance {})'.format(idx, mag_id[idx], calibration_instance[idx]))
print('\033[91m \n{}-based compensation: \033[0m'.format(comp_type))
print('\nparam set CAL_MAG_COMP_TYP {}'.format(comp_type_param))
for idx in range(n_mag):
print('\nparam set CAL_MAG{}_XCOMP {:.3f}'.format(calibration_instance[idx], factor * px[idx][0]))
print('param set CAL_MAG{}_YCOMP {:.3f}'.format(calibration_instance[idx], factor * py[idx][0]))
print('param set CAL_MAG{}_ZCOMP {:.3f}'.format(calibration_instance[idx], factor * pz[idx][0]))
#plot data
for idx in range(n_mag):
fig = plt.figure(num=None, figsize=(25, 14), dpi=80, facecolor='w', edgecolor='k')
fig.suptitle('Compensation Parameter Fit \n{} \nmag {} ID: {} (calibration instance {})'.format(log_name, idx, mag_id[idx], calibration_instance[idx]), fontsize=14, fontweight='bold')
plt.subplot(1,3,1)
plt.plot(power_resampled[idx], magX_body[idx], 'yo', power_resampled[idx], px[idx][0]*power_resampled[idx]+px[idx][1], '--k')
plt.xlabel('current [kA]')
plt.ylabel('mag X [G]')
plt.subplot(1,3,2)
plt.plot(power_resampled[idx], magY_body[idx], 'yo', power_resampled[idx], py[idx][0]*power_resampled[idx]+py[idx][1], '--k')
plt.xlabel('current [kA]')
plt.ylabel('mag Y [G]')
plt.subplot(1,3,3)
plt.plot(power_resampled[idx], magZ_body[idx], 'yo', power_resampled[idx], pz[idx][0]*power_resampled[idx]+pz[idx][1], '--k')
plt.xlabel('current [kA]')
plt.ylabel('mag Z [G]')
# display results
plt.figtext(0.24, 0.03, 'CAL_MAG{}_XCOMP: {:.3f} {}'.format(calibration_instance[idx],factor * px[idx][0],unit), horizontalalignment='center', fontsize=12, multialignment='left', bbox=dict(boxstyle="round", facecolor='#D8D8D8', ec="0.5", pad=0.5, alpha=1), fontweight='bold')
plt.figtext(0.51, 0.03, 'CAL_MAG{}_YCOMP: {:.3f} {}'.format(calibration_instance[idx],factor * py[idx][0],unit), horizontalalignment='center', fontsize=12, multialignment='left', bbox=dict(boxstyle="round", facecolor='#D8D8D8', ec="0.5", pad=0.5, alpha=1), fontweight='bold')
plt.figtext(0.79, 0.03, 'CAL_MAG{}_ZCOMP: {:.3f} {}'.format(calibration_instance[idx],factor * pz[idx][0],unit), horizontalalignment='center', fontsize=12, multialignment='left', bbox=dict(boxstyle="round", facecolor='#D8D8D8', ec="0.5", pad=0.5, alpha=1), fontweight='bold')
#compensation comparison plots
for idx in range(n_mag):
fig = plt.figure(num=None, figsize=(25, 14), dpi=80, facecolor='w', edgecolor='k')
fig.suptitle('Original Data vs. Compensation \n{}\nmag {} ID: {} (calibration instance {})'.format(log_name, idx, mag_id[idx], calibration_instance[idx]), fontsize=14, fontweight='bold')
plt.subplot(3,1,1)
original_x, = plt.plot(t_mag[idx], magX_body[idx], label='original')
power_x, = plt.plot(t_mag[idx],magX_body[idx] - px[idx][0] * power_resampled[idx], label='compensated')
plt.legend(handles=[original_x, power_x])
plt.xlabel('Time [s]')
plt.ylabel('Mag X corrected[G]')
plt.subplot(3,1,2)
original_y, = plt.plot(t_mag[idx], magY_body[idx], label='original')
power_y, = plt.plot(t_mag[idx],magY_body[idx] - py[idx][0] * power_resampled[idx], label='compensated')
plt.legend(handles=[original_y, power_y])
plt.xlabel('Time [s]')
plt.ylabel('Mag Y corrected[G]')
plt.subplot(3,1,3)
original_z, = plt.plot(t_mag[idx], magZ_body[idx], label='original')
power_z, = plt.plot(t_mag[idx],magZ_body[idx] - pz[idx][0] * power_resampled[idx], label='compensated')
plt.legend(handles=[original_z, power_z])
plt.xlabel('Time [s]')
plt.ylabel('Mag Z corrected[G]')
plt.show()
| [((30, 9, 30, 91), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((45, 6, 45, 20), 'pyulog.ULog', 'ULog', ({(45, 11, 45, 19): 'log_name'}, {}), '(log_name)', False, 'from pyulog import ULog\n'), ((46, 8, 46, 20), 'pyulog.px4.PX4ULog', 'PX4ULog', ({(46, 16, 46, 19): 'log'}, {}), '(log)', False, 'from pyulog.px4 import PX4ULog\n'), ((272, 0, 272, 10), 'matplotlib.pylab.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pylab as plt\n'), ((222, 10, 222, 86), 'matplotlib.pylab.figure', 'plt.figure', (), '', True, 'import matplotlib.pylab as plt\n'), ((225, 4, 225, 22), 'matplotlib.pylab.subplot', 'plt.subplot', ({(225, 16, 225, 17): '(1)', (225, 18, 225, 19): '(3)', (225, 20, 225, 21): '(1)'}, {}), '(1, 3, 1)', True, 'import matplotlib.pylab as plt\n'), ((226, 4, 226, 129), 'matplotlib.pylab.plot', 'plt.plot', ({(226, 13, 226, 33): 'power_resampled[idx]', (226, 35, 226, 49): 'magX_body[idx]', (226, 51, 226, 55): '"""yo"""', (226, 57, 226, 77): 'power_resampled[idx]', (226, 79, 226, 121): '(px[idx][0] * power_resampled[idx] + px[idx][1])', (226, 123, 226, 128): '"""--k"""'}, {}), "(power_resampled[idx], magX_body[idx], 'yo', power_resampled[idx], \n px[idx][0] * power_resampled[idx] + px[idx][1], '--k')", True, 'import matplotlib.pylab as plt\n'), ((227, 4, 227, 30), 'matplotlib.pylab.xlabel', 'plt.xlabel', ({(227, 15, 227, 29): '"""current [kA]"""'}, {}), "('current [kA]')", True, 'import matplotlib.pylab as plt\n'), ((228, 4, 228, 27), 'matplotlib.pylab.ylabel', 'plt.ylabel', ({(228, 15, 228, 26): '"""mag X [G]"""'}, {}), "('mag X [G]')", True, 'import matplotlib.pylab as plt\n'), ((230, 4, 230, 22), 'matplotlib.pylab.subplot', 'plt.subplot', ({(230, 16, 230, 17): '(1)', (230, 18, 230, 19): '(3)', (230, 20, 230, 21): '(2)'}, {}), '(1, 3, 2)', True, 'import matplotlib.pylab as plt\n'), ((231, 4, 231, 129), 'matplotlib.pylab.plot', 'plt.plot', ({(231, 13, 231, 33): 'power_resampled[idx]', (231, 35, 231, 49): 'magY_body[idx]', (231, 51, 231, 55): '"""yo"""', (231, 57, 231, 77): 'power_resampled[idx]', (231, 79, 231, 121): '(py[idx][0] * power_resampled[idx] + py[idx][1])', (231, 123, 231, 128): '"""--k"""'}, {}), "(power_resampled[idx], magY_body[idx], 'yo', power_resampled[idx], \n py[idx][0] * power_resampled[idx] + py[idx][1], '--k')", True, 'import matplotlib.pylab as plt\n'), ((232, 4, 232, 30), 'matplotlib.pylab.xlabel', 'plt.xlabel', ({(232, 15, 232, 29): '"""current [kA]"""'}, {}), "('current [kA]')", True, 'import matplotlib.pylab as plt\n'), ((233, 4, 233, 27), 'matplotlib.pylab.ylabel', 'plt.ylabel', ({(233, 15, 233, 26): '"""mag Y [G]"""'}, {}), "('mag Y [G]')", True, 'import matplotlib.pylab as plt\n'), ((235, 4, 235, 22), 'matplotlib.pylab.subplot', 'plt.subplot', ({(235, 16, 235, 17): '(1)', (235, 18, 235, 19): '(3)', (235, 20, 235, 21): '(3)'}, {}), '(1, 3, 3)', True, 'import matplotlib.pylab as plt\n'), ((236, 4, 236, 129), 'matplotlib.pylab.plot', 'plt.plot', ({(236, 13, 236, 33): 'power_resampled[idx]', (236, 35, 236, 49): 'magZ_body[idx]', (236, 51, 236, 55): '"""yo"""', (236, 57, 236, 77): 'power_resampled[idx]', (236, 79, 236, 121): '(pz[idx][0] * power_resampled[idx] + pz[idx][1])', (236, 123, 236, 128): '"""--k"""'}, {}), "(power_resampled[idx], magZ_body[idx], 'yo', power_resampled[idx], \n pz[idx][0] * power_resampled[idx] + pz[idx][1], '--k')", True, 'import matplotlib.pylab as plt\n'), ((237, 4, 237, 30), 'matplotlib.pylab.xlabel', 'plt.xlabel', ({(237, 15, 237, 29): '"""current [kA]"""'}, {}), "('current [kA]')", True, 'import matplotlib.pylab as plt\n'), ((238, 4, 238, 27), 'matplotlib.pylab.ylabel', 'plt.ylabel', ({(238, 15, 238, 26): '"""mag Z [G]"""'}, {}), "('mag Z [G]')", True, 'import matplotlib.pylab as plt\n'), ((248, 10, 248, 86), 'matplotlib.pylab.figure', 'plt.figure', (), '', True, 'import matplotlib.pylab as plt\n'), ((251, 4, 251, 22), 'matplotlib.pylab.subplot', 'plt.subplot', ({(251, 16, 251, 17): '(3)', (251, 18, 251, 19): '(1)', (251, 20, 251, 21): '(1)'}, {}), '(3, 1, 1)', True, 'import matplotlib.pylab as plt\n'), ((252, 18, 252, 72), 'matplotlib.pylab.plot', 'plt.plot', (), '', True, 'import matplotlib.pylab as plt\n'), ((253, 15, 253, 107), 'matplotlib.pylab.plot', 'plt.plot', (), '', True, 'import matplotlib.pylab as plt\n'), ((254, 4, 254, 45), 'matplotlib.pylab.legend', 'plt.legend', (), '', True, 'import matplotlib.pylab as plt\n'), ((255, 4, 255, 26), 'matplotlib.pylab.xlabel', 'plt.xlabel', ({(255, 15, 255, 25): '"""Time [s]"""'}, {}), "('Time [s]')", True, 'import matplotlib.pylab as plt\n'), ((256, 4, 256, 36), 'matplotlib.pylab.ylabel', 'plt.ylabel', ({(256, 15, 256, 35): '"""Mag X corrected[G]"""'}, {}), "('Mag X corrected[G]')", True, 'import matplotlib.pylab as plt\n'), ((258, 4, 258, 22), 'matplotlib.pylab.subplot', 'plt.subplot', ({(258, 16, 258, 17): '(3)', (258, 18, 258, 19): '(1)', (258, 20, 258, 21): '(2)'}, {}), '(3, 1, 2)', True, 'import matplotlib.pylab as plt\n'), ((259, 18, 259, 72), 'matplotlib.pylab.plot', 'plt.plot', (), '', True, 'import matplotlib.pylab as plt\n'), ((260, 15, 260, 107), 'matplotlib.pylab.plot', 'plt.plot', (), '', True, 'import matplotlib.pylab as plt\n'), ((261, 4, 261, 45), 'matplotlib.pylab.legend', 'plt.legend', (), '', True, 'import matplotlib.pylab as plt\n'), ((262, 4, 262, 26), 'matplotlib.pylab.xlabel', 'plt.xlabel', ({(262, 15, 262, 25): '"""Time [s]"""'}, {}), "('Time [s]')", True, 'import matplotlib.pylab as plt\n'), ((263, 4, 263, 36), 'matplotlib.pylab.ylabel', 'plt.ylabel', ({(263, 15, 263, 35): '"""Mag Y corrected[G]"""'}, {}), "('Mag Y corrected[G]')", True, 'import matplotlib.pylab as plt\n'), ((265, 4, 265, 22), 'matplotlib.pylab.subplot', 'plt.subplot', ({(265, 16, 265, 17): '(3)', (265, 18, 265, 19): '(1)', (265, 20, 265, 21): '(3)'}, {}), '(3, 1, 3)', True, 'import matplotlib.pylab as plt\n'), ((266, 18, 266, 72), 'matplotlib.pylab.plot', 'plt.plot', (), '', True, 'import matplotlib.pylab as plt\n'), ((267, 15, 267, 107), 'matplotlib.pylab.plot', 'plt.plot', (), '', True, 'import matplotlib.pylab as plt\n'), ((268, 4, 268, 45), 'matplotlib.pylab.legend', 'plt.legend', (), '', True, 'import matplotlib.pylab as plt\n'), ((269, 4, 269, 26), 'matplotlib.pylab.xlabel', 'plt.xlabel', ({(269, 15, 269, 25): '"""Time [s]"""'}, {}), "('Time [s]')", True, 'import matplotlib.pylab as plt\n'), ((270, 4, 270, 36), 'matplotlib.pylab.ylabel', 'plt.ylabel', ({(270, 15, 270, 35): '"""Mag Z corrected[G]"""'}, {}), "('Mag Z corrected[G]')", True, 'import matplotlib.pylab as plt\n'), ((73, 9, 73, 36), 'numpy.true_divide', 'np.true_divide', ({(73, 24, 73, 29): 'power', (73, 31, 73, 35): '1000'}, {}), '(power, 1000)', True, 'import numpy as np\n')] |
19857625778/watchlist | app.py | 284e3f814394d0fda6e262ab84177a493027c19e | from flask import Flask
app = Flask(_name_)
@app.route('/')
def hello():
return 'welcome to my watchlist' | [((2, 6, 2, 19), 'flask.Flask', 'Flask', ({(2, 12, 2, 18): '_name_'}, {}), '(_name_)', False, 'from flask import Flask\n')] |
AI-Traiding-Team/paired_trading | portfolio_optimization/constants.py | 72d4dd0071314e2f0efaa26931ca7339199fc998 | import os
path1 = "outputs"
path2 = "outputs/_imgs"
path3 = "outputs/max_sharpe_weights"
path4 = "outputs/opt_portfolio_trades"
try:
os.mkdir(path1)
except OSError:
print ("Директория %s уже создана" % path1)
else:
print ("Успешно создана директория %s " % path1)
try:
os.makedirs(path2)
os.makedirs(path3)
os.makedirs(path4)
except OSError:
print ("Директории уже созданы")
else:
print ("Успешно созданы нужные директории")
source_path = '../source_root/1m'
destination_path = 'outputs' | [((9, 4, 9, 19), 'os.mkdir', 'os.mkdir', ({(9, 13, 9, 18): 'path1'}, {}), '(path1)', False, 'import os\n'), ((16, 4, 16, 22), 'os.makedirs', 'os.makedirs', ({(16, 16, 16, 21): 'path2'}, {}), '(path2)', False, 'import os\n'), ((17, 4, 17, 22), 'os.makedirs', 'os.makedirs', ({(17, 16, 17, 21): 'path3'}, {}), '(path3)', False, 'import os\n'), ((18, 4, 18, 22), 'os.makedirs', 'os.makedirs', ({(18, 16, 18, 21): 'path4'}, {}), '(path4)', False, 'import os\n')] |
silky/mypy | mypy/transformtype.py | de6a8d3710df9f49109cb682f2092e4967bfb92c | """Transform classes for runtime type checking."""
from typing import Undefined, List, Set, Any, cast, Tuple, Dict
from mypy.nodes import (
TypeDef, Node, FuncDef, VarDef, Block, Var, ExpressionStmt,
TypeInfo, SuperExpr, NameExpr, CallExpr, MDEF, MemberExpr, ReturnStmt,
AssignmentStmt, TypeExpr, PassStmt, SymbolTableNode
)
from mypy import nodes
from mypy.semanal import self_type
from mypy.types import (
Callable, Instance, Type, AnyType, BOUND_VAR, Void, RuntimeTypeVar,
UnboundType
)
from mypy.checkmember import analyse_member_access
from mypy.checkexpr import type_object_type
from mypy.subtypes import map_instance_to_supertype
import mypy.transform
from mypy.transformfunc import FuncTransformer
from mypy.transutil import (
self_expr, tvar_slot_name, tvar_arg_name, prepend_arg_type
)
from mypy.rttypevars import translate_runtime_type_vars_locally
from mypy.compileslotmap import find_slot_origin
from mypy.coerce import coerce
from mypy.maptypevar import num_slots, get_tvar_access_path
from mypy import erasetype
class TypeTransformer:
"""Class for transforming type definitions for runtime type checking.
Transform a type definition by modifying it in-place.
The following transformations are performed:
* Represent generic type variables explicitly as attributes.
* Create generic wrapper classes used by coercions to different type
args.
* Create wrapper methods needed when overriding methods with different
signatures.
* Create wrapper methods for calling methods in dynamically typed code.
These perform the necessary coercions for arguments and return values
to/from 'Any'.
This is used by DyncheckTransformVisitor and is logically aggregated within
that class.
"""
# Used for common transformation operations.
tf = Undefined('mypy.transform.DyncheckTransformVisitor')
# Used for transforming methods.
func_tf = Undefined(FuncTransformer)
def __init__(self, tf: 'mypy.transform.DyncheckTransformVisitor') -> None:
self.tf = tf
self.func_tf = FuncTransformer(tf)
def transform_type_def(self, tdef: TypeDef) -> List[Node]:
"""Transform a type definition.
The result may be one or two definitions. The first is the
transformation of the original TypeDef. The second is a
wrapper type, which is generated for generic types only.
"""
defs = [] # type: List[Node]
if tdef.info.type_vars:
# This is a generic type. Insert type variable slots in
# the class definition for new type variables, i.e. type
# variables not mapped to superclass type variables.
defs.extend(self.make_tvar_representation(tdef.info))
# Iterate over definitions and transform each of them.
vars = set() # type: Set[Var]
for d in tdef.defs.body:
if isinstance(d, FuncDef):
# Implicit cast from FuncDef[] to Node[] is safe below.
defs.extend(Any(self.func_tf.transform_method(d)))
elif isinstance(d, VarDef):
defs.extend(self.transform_var_def(d))
for n in d.items:
vars.add(n)
elif isinstance(d, AssignmentStmt):
self.transform_assignment(d)
defs.append(d)
# Add accessors for implicitly defined attributes.
for node in tdef.info.names.values():
if isinstance(node.node, Var):
v = cast(Var, node.node)
if v.info == tdef.info and v not in vars:
defs.extend(self.make_accessors(v))
# For generic classes, add an implicit __init__ wrapper.
defs.extend(self.make_init_wrapper(tdef))
if tdef.is_generic() or (tdef.info.bases and
tdef.info.mro[1].is_generic()):
self.make_instance_tvar_initializer(
cast(FuncDef, tdef.info.get_method('__init__')))
if not defs:
defs.append(PassStmt())
if tdef.is_generic():
gen_wrapper = self.generic_class_wrapper(tdef)
tdef.defs = Block(defs)
dyn_wrapper = self.make_type_object_wrapper(tdef)
if not tdef.is_generic():
return [tdef, dyn_wrapper]
else:
return [tdef, dyn_wrapper, gen_wrapper]
def make_init_wrapper(self, tdef: TypeDef) -> List[Node]:
"""Make and return an implicit __init__ if class needs it.
Otherwise, return an empty list. We include an implicit
__init__ if the class is generic or if it extends a generic class
and if it does not define __init__.
The __init__ of a generic class requires one or more extra type
variable arguments. The inherited __init__ may not accept these.
For example, assume these definitions:
. class A(Generic[T]): pass
. class B(A[int]): pass
The constructor for B will be (equivalent to)
. def __init__(self: B) -> None:
. self.__tv = <int>
. super().__init__(<int>)
"""
# FIX overloading, default args / varargs, keyword args
info = tdef.info
if '__init__' not in info.names and (
tdef.is_generic() or (info.bases and
info.mro[1].is_generic())):
# Generic class with no explicit __init__ method
# (i.e. __init__ inherited from superclass). Generate a
# wrapper that initializes type variable slots and calls
# the superclass __init__ method.
base = info.mro[1]
selftype = self_type(info)
callee_type = cast(Callable, analyse_member_access(
'__init__', selftype, None, False, True, None, None,
base))
# Now the callee type may contain the type variables of a
# grandparent as bound type variables, but we want the
# type variables of the parent class. Explicitly set the
# bound type variables.
callee_type = self.fix_bound_init_tvars(callee_type,
map_instance_to_supertype(selftype, base))
super_init = cast(FuncDef, base.get_method('__init__'))
# Build argument list.
args = [Var('self')]
for i in range(1, len(super_init.args)):
args.append(Var(super_init.args[i].name()))
args[-1].type = callee_type.arg_types[i - 1]
selft = self_type(self.tf.type_context())
callee_type = prepend_arg_type(callee_type, selft)
creat = FuncDef('__init__', args,
super_init.arg_kinds, [None] * len(args),
Block([]))
creat.info = tdef.info
creat.type = callee_type
creat.is_implicit = False
tdef.info.names['__init__'] = SymbolTableNode(MDEF, creat,
typ=creat.type)
# Insert a call to superclass constructor. If the
# superclass is object, the constructor does nothing =>
# omit the call.
if base.fullname() != 'builtins.object':
creat.body.body.append(
self.make_superclass_constructor_call(tdef.info,
callee_type))
# Implicit cast from FuncDef[] to Node[] is safe below.
return Any(self.func_tf.transform_method(creat))
else:
return []
def fix_bound_init_tvars(self, callable: Callable,
typ: Instance) -> Callable:
"""Replace bound type vars of callable with args from instance type."""
a = [] # type: List[Tuple[int, Type]]
for i in range(len(typ.args)):
a.append((i + 1, typ.args[i]))
return Callable(callable.arg_types, callable.arg_kinds,
callable.arg_names, callable.ret_type,
callable.is_type_obj(), callable.name,
callable.variables, a)
def make_superclass_constructor_call(
self, info: TypeInfo, callee_type: Callable) -> ExpressionStmt:
"""Construct a statement that calls the superclass constructor.
In particular, it passes any type variables arguments as needed.
"""
callee = SuperExpr('__init__')
callee.info = info
# We do not handle generic constructors. Either pass runtime
# type variables from the current scope or perhaps require
# explicit constructor in this case.
selftype = self_type(info)
# FIX overloading
# FIX default args / varargs
# Map self type to the superclass context.
base = info.mro[1]
selftype = map_instance_to_supertype(selftype, base)
super_init = cast(FuncDef, base.get_method('__init__'))
# Add constructor arguments.
args = [] # type: List[Node]
for n in range(1, callee_type.min_args):
args.append(NameExpr(super_init.args[n].name()))
self.tf.set_type(args[-1], callee_type.arg_types[n])
# Store callee type after stripping away the 'self' type.
self.tf.set_type(callee, nodes.method_callable(callee_type))
call = CallExpr(callee, args, [nodes.ARG_POS] * len(args))
return ExpressionStmt(call)
def transform_var_def(self, o: VarDef) -> List[Node]:
"""Transform a member variable definition.
The result may be one or more definitions.
"""
res = [o] # type: List[Node]
self.tf.visit_var_def(o)
# Add $x and set$x accessor wrappers for data attributes. These let
# derived classes redefine a data attribute as a property.
for n in o.items:
res.extend(self.make_accessors(n))
return res
def transform_assignment(self, o: AssignmentStmt) -> None:
"""Transform an assignment statement in class body."""
self.tf.visit_assignment_stmt(o)
def make_accessors(self, n: Var) -> List[Node]:
if n.type:
t = n.type
else:
t = AnyType()
return [self.make_getter_wrapper(n.name(), t),
self.make_setter_wrapper(n.name(), t),
self.make_dynamic_getter_wrapper(n.name(), t),
self.make_dynamic_setter_wrapper(n.name(), t)]
def make_getter_wrapper(self, name: str, typ: Type) -> FuncDef:
"""Create a getter wrapper for a data attribute.
The getter will be of this form:
. def $name*(self: C) -> type:
. return self.name!
"""
scope = self.make_scope()
selft = self.self_type()
selfv = scope.add('self', selft)
member_expr = MemberExpr(scope.name_expr('self'), name, direct=True)
ret = ReturnStmt(member_expr)
wrapper_name = '$' + name
sig = Callable([selft], [nodes.ARG_POS], [None], typ, False)
fdef = FuncDef(wrapper_name,
[selfv],
[nodes.ARG_POS],
[None],
Block([ret]), sig)
fdef.info = self.tf.type_context()
return fdef
def make_dynamic_getter_wrapper(self, name: str, typ: Type) -> FuncDef:
"""Create a dynamically-typed getter wrapper for a data attribute.
The getter will be of this form:
. def $name*(self: C) -> Any:
. return {Any <= typ self.name!}
"""
scope = self.make_scope()
selft = self.self_type()
selfv = scope.add('self', selft)
member_expr = MemberExpr(scope.name_expr('self'), name, direct=True)
coerce_expr = coerce(member_expr, AnyType(), typ,
self.tf.type_context())
ret = ReturnStmt(coerce_expr)
wrapper_name = '$' + name + self.tf.dynamic_suffix()
sig = Callable([selft], [nodes.ARG_POS], [None], AnyType(), False)
return FuncDef(wrapper_name,
[selfv],
[nodes.ARG_POS],
[None],
Block([ret]), sig)
def make_setter_wrapper(self, name: str, typ: Type) -> FuncDef:
"""Create a setter wrapper for a data attribute.
The setter will be of this form:
. def set$name(self: C, name: typ) -> None:
. self.name! = name
"""
scope = self.make_scope()
selft = self.self_type()
selfv = scope.add('self', selft)
namev = scope.add(name, typ)
lvalue = MemberExpr(scope.name_expr('self'), name, direct=True)
rvalue = scope.name_expr(name)
ret = AssignmentStmt([lvalue], rvalue)
wrapper_name = 'set$' + name
sig = Callable([selft, typ],
[nodes.ARG_POS, nodes.ARG_POS],
[None, None],
Void(), False)
fdef = FuncDef(wrapper_name,
[selfv, namev],
[nodes.ARG_POS, nodes.ARG_POS],
[None, None],
Block([ret]), sig)
fdef.info = self.tf.type_context()
return fdef
def make_dynamic_setter_wrapper(self, name: str, typ: Type) -> FuncDef:
"""Create a dynamically-typed setter wrapper for a data attribute.
The setter will be of this form:
. def set$name*(self: C, name; Any) -> None:
. self.name! = {typ name}
"""
lvalue = MemberExpr(self_expr(), name, direct=True)
name_expr = NameExpr(name)
rvalue = coerce(name_expr, typ, AnyType(), self.tf.type_context())
ret = AssignmentStmt([lvalue], rvalue)
wrapper_name = 'set$' + name + self.tf.dynamic_suffix()
selft = self_type(self.tf.type_context())
sig = Callable([selft, AnyType()],
[nodes.ARG_POS, nodes.ARG_POS],
[None, None],
Void(), False)
return FuncDef(wrapper_name,
[Var('self'), Var(name)],
[nodes.ARG_POS, nodes.ARG_POS],
[None, None],
Block([ret]), sig)
def generic_accessor_wrappers(self, s: AssignmentStmt) -> List[Node]:
"""Construct wrapper class methods for attribute accessors."""
res = [] # type: List[Node]
assert len(s.lvalues) == 1
assert isinstance(s.lvalues[0], NameExpr)
assert s.type is not None
name = cast(NameExpr, s.lvalues[0])
for fd in [self.make_getter_wrapper(name.name, s.type),
self.make_setter_wrapper(name.name, s.type)]:
res.extend(self.func_tf.generic_method_wrappers(fd))
return res
def generic_class_wrapper(self, tdef: TypeDef) -> TypeDef:
"""Construct a wrapper class for a generic type."""
# FIX semanal meta-info for nodes + TypeInfo
defs = [] # type: List[Node]
# Does the type have a superclass, other than builtins.object?
base = tdef.info.mro[1]
has_proper_superclass = base.fullname() != 'builtins.object'
if not has_proper_superclass or self.tf.is_java:
# Generate member variables for wrapper object.
defs.extend(self.make_generic_wrapper_member_vars(tdef))
for alt in [False, BOUND_VAR]:
defs.extend(self.make_tvar_representation(tdef.info, alt))
# Generate constructor.
defs.append(self.make_generic_wrapper_init(tdef.info))
# Generate method wrappers.
for d in tdef.defs.body:
if isinstance(d, FuncDef):
if not d.is_constructor():
defs.extend(self.func_tf.generic_method_wrappers(d))
elif isinstance(d, AssignmentStmt):
defs.extend(self.generic_accessor_wrappers(d))
elif not isinstance(d, PassStmt):
raise RuntimeError(
'Definition {} at line {} not supported'.format(
type(d), d.line))
base_type = self.tf.named_type('builtins.object') # type: Type
# Inherit superclass wrapper if there is one.
if has_proper_superclass:
base = self.find_generic_base_class(tdef.info)
if base:
# TODO bind the type somewhere
base_type = UnboundType(base.defn.name +
self.tf.wrapper_class_suffix())
# Build the type definition.
wrapper = TypeDef(tdef.name + self.tf.wrapper_class_suffix(),
Block(defs),
None,
[base_type])
# FIX fullname
self.tf.add_line_mapping(tdef, wrapper)
return wrapper
def find_generic_base_class(self, info: TypeInfo) -> TypeInfo:
base = info.mro[1]
while True:
if base.type_vars != []:
return base
if len(base.mro) <= 1:
return None
base = base.mro[1]
def make_generic_wrapper_member_vars(self, tdef: TypeDef) -> List[Node]:
"""Generate member variable definition for wrapped object (__o).
This is added to a generic wrapper class.
"""
# The type is 'Any' since it should behave covariantly in subclasses.
return [VarDef([Var(self.object_member_name(tdef.info),
AnyType())], False, None)]
def object_member_name(self, info: TypeInfo) -> str:
if self.tf.is_java:
return '__o_{}'.format(info.name)
else:
return '__o'
def make_generic_wrapper_init(self, info: TypeInfo) -> FuncDef:
"""Build constructor of a generic wrapper class."""
nslots = num_slots(info)
cdefs = [] # type: List[Node]
# Build superclass constructor call.
base = info.mro[1]
if base.fullname() != 'builtins.object' and self.tf.is_java:
s = SuperExpr('__init__')
cargs = [NameExpr('__o')] # type: List[Node]
for n in range(num_slots(base)):
cargs.append(NameExpr(tvar_arg_name(n + 1)))
for n in range(num_slots(base)):
cargs.append(NameExpr(tvar_arg_name(n + 1, BOUND_VAR)))
c = CallExpr(s, cargs, [nodes.ARG_POS] * len(cargs))
cdefs.append(ExpressionStmt(c))
# Create initialization of the wrapped object.
cdefs.append(AssignmentStmt([MemberExpr(
self_expr(),
self.object_member_name(info),
direct=True)],
NameExpr('__o')))
# Build constructor arguments.
args = [Var('self'), Var('__o')]
init = [None, None] # type: List[Node]
for alt in [False, BOUND_VAR]:
for n in range(nslots):
args.append(Var(tvar_arg_name(n + 1, alt)))
init.append(None)
nargs = nslots * 2 + 2
fdef = FuncDef('__init__',
args,
[nodes.ARG_POS] * nargs,
init,
Block(cdefs),
Callable( [AnyType()] * nargs,
[nodes.ARG_POS] * nargs, [None] * nargs,
Void(),
is_type_obj=False))
fdef.info = info
self.make_wrapper_slot_initializer(fdef)
return fdef
def make_tvar_representation(self, info: TypeInfo,
is_alt: Any = False) -> List[Node]:
"""Return type variable slot member definitions.
There are of form '__tv*: Any'. Only include new slots defined in the
type.
"""
defs = [] # type: List[Node]
base_slots = num_slots(info.mro[1])
for n in range(len(info.type_vars)):
# Only include a type variable if it introduces a new slot.
slot = get_tvar_access_path(info, n + 1)[0] - 1
if slot >= base_slots:
defs.append(VarDef([Var(tvar_slot_name(slot, is_alt),
AnyType())], False, None))
return defs
def make_instance_tvar_initializer(self, creat: FuncDef) -> None:
"""Add type variable member initialization code to a constructor.
Modify the constructor body directly.
"""
for n in range(num_slots(creat.info)):
rvalue = self.make_tvar_init_expression(creat.info, n)
init = AssignmentStmt([MemberExpr(self_expr(),
tvar_slot_name(n),
direct=True)],
rvalue)
self.tf.set_type(init.lvalues[0], AnyType())
self.tf.set_type(init.rvalue, AnyType())
creat.body.body.insert(n, init)
def make_wrapper_slot_initializer(self, creat: FuncDef) -> None:
"""Add type variable member initializations to a wrapper constructor.
The function must be a constructor of a generic wrapper class. Modify
the constructor body directly.
"""
for alt in [BOUND_VAR, False]:
for n in range(num_slots(creat.info)):
rvalue = TypeExpr(
RuntimeTypeVar(NameExpr(tvar_slot_name(n, alt))))
init = AssignmentStmt(
[MemberExpr(self_expr(),
tvar_slot_name(n, alt), direct=True)],
rvalue)
self.tf.set_type(init.lvalues[0], AnyType())
self.tf.set_type(init.rvalue, AnyType())
creat.body.body.insert(n, init)
def make_tvar_init_expression(self, info: TypeInfo, slot: int) -> TypeExpr:
"""Return the initializer for the given slot in the given type.
This is the type expression that initializes the given slot
using the type arguments given to the constructor.
Examples:
- In 'class C(Generic[T]) ...', the initializer for the slot 0 is
TypeExpr(RuntimeTypeVar(NameExpr('__tv'))).
- In 'class D(C[int]) ...', the initializer for the slot 0 is
TypeExpr(<int instance>).
"""
# Figure out the superclass which defines the slot; also figure out
# the tvar index that maps to the slot.
origin, tv = find_slot_origin(info, slot)
# Map self type to the superclass -> extract tvar with target index
# (only contains subclass tvars?? PROBABLY NOT).
selftype = self_type(info)
selftype = map_instance_to_supertype(selftype, origin)
tvar = selftype.args[tv - 1]
# Map tvar to an expression; refer to local vars instead of member
# vars always.
tvar = translate_runtime_type_vars_locally(tvar)
# Build the rvalue (initializer) expression
return TypeExpr(tvar)
def make_type_object_wrapper(self, tdef: TypeDef) -> FuncDef:
"""Construct dynamically typed wrapper function for a class.
It simple calls the type object and returns the result.
"""
# TODO keyword args, default args and varargs
# TODO overloads
type_sig = cast(Callable, type_object_type(tdef.info, None))
type_sig = cast(Callable, erasetype.erase_typevars(type_sig))
init = cast(FuncDef, tdef.info.get_method('__init__'))
arg_kinds = type_sig.arg_kinds
# The wrapper function has a dynamically typed signature.
wrapper_sig = Callable( [AnyType()] * len(arg_kinds),
arg_kinds, [None] * len(arg_kinds),
AnyType(), False)
n = NameExpr(tdef.name) # TODO full name
args = self.func_tf.call_args(
init.args[1:],
type_sig,
wrapper_sig,
True, False)
call = CallExpr(n, args, arg_kinds)
ret = ReturnStmt(call)
fdef = FuncDef(tdef.name + self.tf.dynamic_suffix(),
init.args[1:],
arg_kinds, [None] * len(arg_kinds),
Block([ret]))
fdef.type = wrapper_sig
return fdef
def self_type(self) -> Instance:
return self_type(self.tf.type_context())
def make_scope(self) -> 'Scope':
return Scope(self.tf.type_map)
class Scope:
"""Maintain a temporary local scope during transformation."""
def __init__(self, type_map: Dict[Node, Type]) -> None:
self.names = {} # type: Dict[str, Var]
self.type_map = type_map
def add(self, name: str, type: Type) -> Var:
v = Var(name)
v.type = type
self.names[name] = v
return v
def name_expr(self, name: str) -> NameExpr:
nexpr = NameExpr(name)
nexpr.kind = nodes.LDEF
node = self.names[name]
nexpr.node = node
self.type_map[nexpr] = node.type
return nexpr
| [((52, 9, 52, 61), 'typing.Undefined', 'Undefined', ({(52, 19, 52, 60): '"""mypy.transform.DyncheckTransformVisitor"""'}, {}), "('mypy.transform.DyncheckTransformVisitor')", False, 'from typing import Undefined, List, Set, Any, cast, Tuple, Dict\n'), ((54, 14, 54, 40), 'typing.Undefined', 'Undefined', ({(54, 24, 54, 39): 'FuncTransformer'}, {}), '(FuncTransformer)', False, 'from typing import Undefined, List, Set, Any, cast, Tuple, Dict\n'), ((58, 23, 58, 42), 'mypy.transformfunc.FuncTransformer', 'FuncTransformer', ({(58, 39, 58, 41): 'tf'}, {}), '(tf)', False, 'from mypy.transformfunc import FuncTransformer\n'), ((110, 20, 110, 31), 'mypy.nodes.Block', 'Block', ({(110, 26, 110, 30): 'defs'}, {}), '(defs)', False, 'from mypy.nodes import TypeDef, Node, FuncDef, VarDef, Block, Var, ExpressionStmt, TypeInfo, SuperExpr, NameExpr, CallExpr, MDEF, MemberExpr, ReturnStmt, AssignmentStmt, TypeExpr, PassStmt, SymbolTableNode\n'), ((216, 17, 216, 38), 'mypy.nodes.SuperExpr', 'SuperExpr', ({(216, 27, 216, 37): '"""__init__"""'}, {}), "('__init__')", False, 'from mypy.nodes import TypeDef, Node, FuncDef, VarDef, Block, Var, ExpressionStmt, TypeInfo, SuperExpr, NameExpr, CallExpr, MDEF, MemberExpr, ReturnStmt, AssignmentStmt, TypeExpr, PassStmt, SymbolTableNode\n'), ((223, 19, 223, 34), 'mypy.semanal.self_type', 'self_type', ({(223, 29, 223, 33): 'info'}, {}), '(info)', False, 'from mypy.semanal import self_type\n'), ((230, 19, 230, 60), 'mypy.subtypes.map_instance_to_supertype', 'map_instance_to_supertype', ({(230, 45, 230, 53): 'selftype', (230, 55, 230, 59): 'base'}, {}), '(selftype, base)', False, 'from mypy.subtypes import map_instance_to_supertype\n'), ((244, 15, 244, 35), 'mypy.nodes.ExpressionStmt', 'ExpressionStmt', ({(244, 30, 244, 34): 'call'}, {}), '(call)', False, 'from mypy.nodes import TypeDef, Node, FuncDef, VarDef, Block, Var, ExpressionStmt, TypeInfo, SuperExpr, NameExpr, CallExpr, MDEF, MemberExpr, ReturnStmt, AssignmentStmt, TypeExpr, PassStmt, SymbolTableNode\n'), ((289, 14, 289, 37), 'mypy.nodes.ReturnStmt', 'ReturnStmt', ({(289, 25, 289, 36): 'member_expr'}, {}), '(member_expr)', False, 'from mypy.nodes import TypeDef, Node, FuncDef, VarDef, Block, Var, ExpressionStmt, TypeInfo, SuperExpr, NameExpr, CallExpr, MDEF, MemberExpr, ReturnStmt, AssignmentStmt, TypeExpr, PassStmt, SymbolTableNode\n'), ((292, 14, 292, 68), 'mypy.types.Callable', 'Callable', ({(292, 23, 292, 30): '[selft]', (292, 32, 292, 47): '[nodes.ARG_POS]', (292, 49, 292, 55): '[None]', (292, 57, 292, 60): 'typ', (292, 62, 292, 67): 'False'}, {}), '([selft], [nodes.ARG_POS], [None], typ, False)', False, 'from mypy.types import Callable, Instance, Type, AnyType, BOUND_VAR, Void, RuntimeTypeVar, UnboundType\n'), ((316, 14, 316, 37), 'mypy.nodes.ReturnStmt', 'ReturnStmt', ({(316, 25, 316, 36): 'coerce_expr'}, {}), '(coerce_expr)', False, 'from mypy.nodes import TypeDef, Node, FuncDef, VarDef, Block, Var, ExpressionStmt, TypeInfo, SuperExpr, NameExpr, CallExpr, MDEF, MemberExpr, ReturnStmt, AssignmentStmt, TypeExpr, PassStmt, SymbolTableNode\n'), ((341, 14, 341, 46), 'mypy.nodes.AssignmentStmt', 'AssignmentStmt', ({(341, 29, 341, 37): '[lvalue]', (341, 39, 341, 45): 'rvalue'}, {}), '([lvalue], rvalue)', False, 'from mypy.nodes import TypeDef, Node, FuncDef, VarDef, Block, Var, ExpressionStmt, TypeInfo, SuperExpr, NameExpr, CallExpr, MDEF, MemberExpr, ReturnStmt, AssignmentStmt, TypeExpr, PassStmt, SymbolTableNode\n'), ((365, 20, 365, 34), 'mypy.nodes.NameExpr', 'NameExpr', ({(365, 29, 365, 33): 'name'}, {}), '(name)', False, 'from mypy.nodes import TypeDef, Node, FuncDef, VarDef, Block, Var, ExpressionStmt, TypeInfo, SuperExpr, NameExpr, CallExpr, MDEF, MemberExpr, ReturnStmt, AssignmentStmt, TypeExpr, PassStmt, SymbolTableNode\n'), ((367, 14, 367, 46), 'mypy.nodes.AssignmentStmt', 'AssignmentStmt', ({(367, 29, 367, 37): '[lvalue]', (367, 39, 367, 45): 'rvalue'}, {}), '([lvalue], rvalue)', False, 'from mypy.nodes import TypeDef, Node, FuncDef, VarDef, Block, Var, ExpressionStmt, TypeInfo, SuperExpr, NameExpr, CallExpr, MDEF, MemberExpr, ReturnStmt, AssignmentStmt, TypeExpr, PassStmt, SymbolTableNode\n'), ((387, 15, 387, 43), 'typing.cast', 'cast', ({(387, 20, 387, 28): 'NameExpr', (387, 30, 387, 42): 's.lvalues[0]'}, {}), '(NameExpr, s.lvalues[0])', False, 'from typing import Undefined, List, Set, Any, cast, Tuple, Dict\n'), ((471, 17, 471, 32), 'mypy.maptypevar.num_slots', 'num_slots', ({(471, 27, 471, 31): 'info'}, {}), '(info)', False, 'from mypy.maptypevar import num_slots, get_tvar_access_path\n'), ((527, 21, 527, 43), 'mypy.maptypevar.num_slots', 'num_slots', ({(527, 31, 527, 42): 'info.mro[1]'}, {}), '(info.mro[1])', False, 'from mypy.maptypevar import num_slots, get_tvar_access_path\n'), ((583, 21, 583, 49), 'mypy.compileslotmap.find_slot_origin', 'find_slot_origin', ({(583, 38, 583, 42): 'info', (583, 44, 583, 48): 'slot'}, {}), '(info, slot)', False, 'from mypy.compileslotmap import find_slot_origin\n'), ((587, 19, 587, 34), 'mypy.semanal.self_type', 'self_type', ({(587, 29, 587, 33): 'info'}, {}), '(info)', False, 'from mypy.semanal import self_type\n'), ((588, 19, 588, 62), 'mypy.subtypes.map_instance_to_supertype', 'map_instance_to_supertype', ({(588, 45, 588, 53): 'selftype', (588, 55, 588, 61): 'origin'}, {}), '(selftype, origin)', False, 'from mypy.subtypes import map_instance_to_supertype\n'), ((593, 15, 593, 56), 'mypy.rttypevars.translate_runtime_type_vars_locally', 'translate_runtime_type_vars_locally', ({(593, 51, 593, 55): 'tvar'}, {}), '(tvar)', False, 'from mypy.rttypevars import translate_runtime_type_vars_locally\n'), ((596, 15, 596, 29), 'mypy.nodes.TypeExpr', 'TypeExpr', ({(596, 24, 596, 28): 'tvar'}, {}), '(tvar)', False, 'from mypy.nodes import TypeDef, Node, FuncDef, VarDef, Block, Var, ExpressionStmt, TypeInfo, SuperExpr, NameExpr, CallExpr, MDEF, MemberExpr, ReturnStmt, AssignmentStmt, TypeExpr, PassStmt, SymbolTableNode\n'), ((618, 12, 618, 31), 'mypy.nodes.NameExpr', 'NameExpr', ({(618, 21, 618, 30): 'tdef.name'}, {}), '(tdef.name)', False, 'from mypy.nodes import TypeDef, Node, FuncDef, VarDef, Block, Var, ExpressionStmt, TypeInfo, SuperExpr, NameExpr, CallExpr, MDEF, MemberExpr, ReturnStmt, AssignmentStmt, TypeExpr, PassStmt, SymbolTableNode\n'), ((624, 15, 624, 43), 'mypy.nodes.CallExpr', 'CallExpr', ({(624, 24, 624, 25): 'n', (624, 27, 624, 31): 'args', (624, 33, 624, 42): 'arg_kinds'}, {}), '(n, args, arg_kinds)', False, 'from mypy.nodes import TypeDef, Node, FuncDef, VarDef, Block, Var, ExpressionStmt, TypeInfo, SuperExpr, NameExpr, CallExpr, MDEF, MemberExpr, ReturnStmt, AssignmentStmt, TypeExpr, PassStmt, SymbolTableNode\n'), ((625, 14, 625, 30), 'mypy.nodes.ReturnStmt', 'ReturnStmt', ({(625, 25, 625, 29): 'call'}, {}), '(call)', False, 'from mypy.nodes import TypeDef, Node, FuncDef, VarDef, Block, Var, ExpressionStmt, TypeInfo, SuperExpr, NameExpr, CallExpr, MDEF, MemberExpr, ReturnStmt, AssignmentStmt, TypeExpr, PassStmt, SymbolTableNode\n'), ((650, 12, 650, 21), 'mypy.nodes.Var', 'Var', ({(650, 16, 650, 20): 'name'}, {}), '(name)', False, 'from mypy.nodes import TypeDef, Node, FuncDef, VarDef, Block, Var, ExpressionStmt, TypeInfo, SuperExpr, NameExpr, CallExpr, MDEF, MemberExpr, ReturnStmt, AssignmentStmt, TypeExpr, PassStmt, SymbolTableNode\n'), ((656, 16, 656, 30), 'mypy.nodes.NameExpr', 'NameExpr', ({(656, 25, 656, 29): 'name'}, {}), '(name)', False, 'from mypy.nodes import TypeDef, Node, FuncDef, VarDef, Block, Var, ExpressionStmt, TypeInfo, SuperExpr, NameExpr, CallExpr, MDEF, MemberExpr, ReturnStmt, AssignmentStmt, TypeExpr, PassStmt, SymbolTableNode\n'), ((154, 23, 154, 38), 'mypy.semanal.self_type', 'self_type', ({(154, 33, 154, 37): 'info'}, {}), '(info)', False, 'from mypy.semanal import self_type\n'), ((175, 26, 175, 62), 'mypy.transutil.prepend_arg_type', 'prepend_arg_type', ({(175, 43, 175, 54): 'callee_type', (175, 56, 175, 61): 'selft'}, {}), '(callee_type, selft)', False, 'from mypy.transutil import self_expr, tvar_slot_name, tvar_arg_name, prepend_arg_type\n'), ((183, 42, 184, 73), 'mypy.nodes.SymbolTableNode', 'SymbolTableNode', (), '', False, 'from mypy.nodes import TypeDef, Node, FuncDef, VarDef, Block, Var, ExpressionStmt, TypeInfo, SuperExpr, NameExpr, CallExpr, MDEF, MemberExpr, ReturnStmt, AssignmentStmt, TypeExpr, PassStmt, SymbolTableNode\n'), ((241, 33, 241, 67), 'mypy.nodes.method_callable', 'nodes.method_callable', ({(241, 55, 241, 66): 'callee_type'}, {}), '(callee_type)', False, 'from mypy import nodes\n'), ((270, 16, 270, 25), 'mypy.types.AnyType', 'AnyType', ({}, {}), '()', False, 'from mypy.types import Callable, Instance, Type, AnyType, BOUND_VAR, Void, RuntimeTypeVar, UnboundType\n'), ((297, 23, 297, 35), 'mypy.nodes.Block', 'Block', ({(297, 29, 297, 34): '[ret]'}, {}), '([ret])', False, 'from mypy.nodes import TypeDef, Node, FuncDef, VarDef, Block, Var, ExpressionStmt, TypeInfo, SuperExpr, NameExpr, CallExpr, MDEF, MemberExpr, ReturnStmt, AssignmentStmt, TypeExpr, PassStmt, SymbolTableNode\n'), ((314, 42, 314, 51), 'mypy.types.AnyType', 'AnyType', ({}, {}), '()', False, 'from mypy.types import Callable, Instance, Type, AnyType, BOUND_VAR, Void, RuntimeTypeVar, UnboundType\n'), ((319, 57, 319, 66), 'mypy.types.AnyType', 'AnyType', ({}, {}), '()', False, 'from mypy.types import Callable, Instance, Type, AnyType, BOUND_VAR, Void, RuntimeTypeVar, UnboundType\n'), ((324, 23, 324, 35), 'mypy.nodes.Block', 'Block', ({(324, 29, 324, 34): '[ret]'}, {}), '([ret])', False, 'from mypy.nodes import TypeDef, Node, FuncDef, VarDef, Block, Var, ExpressionStmt, TypeInfo, SuperExpr, NameExpr, CallExpr, MDEF, MemberExpr, ReturnStmt, AssignmentStmt, TypeExpr, PassStmt, SymbolTableNode\n'), ((347, 23, 347, 29), 'mypy.types.Void', 'Void', ({}, {}), '()', False, 'from mypy.types import Callable, Instance, Type, AnyType, BOUND_VAR, Void, RuntimeTypeVar, UnboundType\n'), ((352, 23, 352, 35), 'mypy.nodes.Block', 'Block', ({(352, 29, 352, 34): '[ret]'}, {}), '([ret])', False, 'from mypy.nodes import TypeDef, Node, FuncDef, VarDef, Block, Var, ExpressionStmt, TypeInfo, SuperExpr, NameExpr, CallExpr, MDEF, MemberExpr, ReturnStmt, AssignmentStmt, TypeExpr, PassStmt, SymbolTableNode\n'), ((364, 28, 364, 39), 'mypy.transutil.self_expr', 'self_expr', ({}, {}), '()', False, 'from mypy.transutil import self_expr, tvar_slot_name, tvar_arg_name, prepend_arg_type\n'), ((366, 40, 366, 49), 'mypy.types.AnyType', 'AnyType', ({}, {}), '()', False, 'from mypy.types import Callable, Instance, Type, AnyType, BOUND_VAR, Void, RuntimeTypeVar, UnboundType\n'), ((374, 23, 374, 29), 'mypy.types.Void', 'Void', ({}, {}), '()', False, 'from mypy.types import Callable, Instance, Type, AnyType, BOUND_VAR, Void, RuntimeTypeVar, UnboundType\n'), ((379, 23, 379, 35), 'mypy.nodes.Block', 'Block', ({(379, 29, 379, 34): '[ret]'}, {}), '([ret])', False, 'from mypy.nodes import TypeDef, Node, FuncDef, VarDef, Block, Var, ExpressionStmt, TypeInfo, SuperExpr, NameExpr, CallExpr, MDEF, MemberExpr, ReturnStmt, AssignmentStmt, TypeExpr, PassStmt, SymbolTableNode\n'), ((436, 26, 436, 37), 'mypy.nodes.Block', 'Block', ({(436, 32, 436, 36): 'defs'}, {}), '(defs)', False, 'from mypy.nodes import TypeDef, Node, FuncDef, VarDef, Block, Var, ExpressionStmt, TypeInfo, SuperExpr, NameExpr, CallExpr, MDEF, MemberExpr, ReturnStmt, AssignmentStmt, TypeExpr, PassStmt, SymbolTableNode\n'), ((478, 16, 478, 37), 'mypy.nodes.SuperExpr', 'SuperExpr', ({(478, 26, 478, 36): '"""__init__"""'}, {}), "('__init__')", False, 'from mypy.nodes import TypeDef, Node, FuncDef, VarDef, Block, Var, ExpressionStmt, TypeInfo, SuperExpr, NameExpr, CallExpr, MDEF, MemberExpr, ReturnStmt, AssignmentStmt, TypeExpr, PassStmt, SymbolTableNode\n'), ((495, 16, 495, 27), 'mypy.nodes.Var', 'Var', ({(495, 20, 495, 26): '"""self"""'}, {}), "('self')", False, 'from mypy.nodes import TypeDef, Node, FuncDef, VarDef, Block, Var, ExpressionStmt, TypeInfo, SuperExpr, NameExpr, CallExpr, MDEF, MemberExpr, ReturnStmt, AssignmentStmt, TypeExpr, PassStmt, SymbolTableNode\n'), ((495, 29, 495, 39), 'mypy.nodes.Var', 'Var', ({(495, 33, 495, 38): '"""__o"""'}, {}), "('__o')", False, 'from mypy.nodes import TypeDef, Node, FuncDef, VarDef, Block, Var, ExpressionStmt, TypeInfo, SuperExpr, NameExpr, CallExpr, MDEF, MemberExpr, ReturnStmt, AssignmentStmt, TypeExpr, PassStmt, SymbolTableNode\n'), ((508, 23, 508, 35), 'mypy.nodes.Block', 'Block', ({(508, 29, 508, 34): 'cdefs'}, {}), '(cdefs)', False, 'from mypy.nodes import TypeDef, Node, FuncDef, VarDef, Block, Var, ExpressionStmt, TypeInfo, SuperExpr, NameExpr, CallExpr, MDEF, MemberExpr, ReturnStmt, AssignmentStmt, TypeExpr, PassStmt, SymbolTableNode\n'), ((541, 23, 541, 44), 'mypy.maptypevar.num_slots', 'num_slots', ({(541, 33, 541, 43): 'creat.info'}, {}), '(creat.info)', False, 'from mypy.maptypevar import num_slots, get_tvar_access_path\n'), ((607, 34, 607, 67), 'mypy.checkexpr.type_object_type', 'type_object_type', ({(607, 51, 607, 60): 'tdef.info', (607, 62, 607, 66): 'None'}, {}), '(tdef.info, None)', False, 'from mypy.checkexpr import type_object_type\n'), ((608, 34, 608, 68), 'mypy.erasetype.erase_typevars', 'erasetype.erase_typevars', ({(608, 59, 608, 67): 'type_sig'}, {}), '(type_sig)', False, 'from mypy import erasetype\n'), ((616, 31, 616, 40), 'mypy.types.AnyType', 'AnyType', ({}, {}), '()', False, 'from mypy.types import Callable, Instance, Type, AnyType, BOUND_VAR, Void, RuntimeTypeVar, UnboundType\n'), ((631, 23, 631, 35), 'mypy.nodes.Block', 'Block', ({(631, 29, 631, 34): '[ret]'}, {}), '([ret])', False, 'from mypy.nodes import TypeDef, Node, FuncDef, VarDef, Block, Var, ExpressionStmt, TypeInfo, SuperExpr, NameExpr, CallExpr, MDEF, MemberExpr, ReturnStmt, AssignmentStmt, TypeExpr, PassStmt, SymbolTableNode\n'), ((92, 20, 92, 40), 'typing.cast', 'cast', ({(92, 25, 92, 28): 'Var', (92, 30, 92, 39): 'node.node'}, {}), '(Var, node.node)', False, 'from typing import Undefined, List, Set, Any, cast, Tuple, Dict\n'), ((105, 24, 105, 34), 'mypy.nodes.PassStmt', 'PassStmt', ({}, {}), '()', False, 'from mypy.nodes import TypeDef, Node, FuncDef, VarDef, Block, Var, ExpressionStmt, TypeInfo, SuperExpr, NameExpr, CallExpr, MDEF, MemberExpr, ReturnStmt, AssignmentStmt, TypeExpr, PassStmt, SymbolTableNode\n'), ((155, 41, 157, 21), 'mypy.checkmember.analyse_member_access', 'analyse_member_access', ({(156, 16, 156, 26): '"""__init__"""', (156, 28, 156, 36): 'selftype', (156, 38, 156, 42): 'None', (156, 44, 156, 49): 'False', (156, 51, 156, 55): 'True', (156, 57, 156, 61): 'None', (156, 63, 156, 67): 'None', (157, 16, 157, 20): 'base'}, {}), "('__init__', selftype, None, False, True, None, None, base\n )", False, 'from mypy.checkmember import analyse_member_access\n'), ((164, 16, 164, 57), 'mypy.subtypes.map_instance_to_supertype', 'map_instance_to_supertype', ({(164, 42, 164, 50): 'selftype', (164, 52, 164, 56): 'base'}, {}), '(selftype, base)', False, 'from mypy.subtypes import map_instance_to_supertype\n'), ((169, 20, 169, 31), 'mypy.nodes.Var', 'Var', ({(169, 24, 169, 30): '"""self"""'}, {}), "('self')", False, 'from mypy.nodes import TypeDef, Node, FuncDef, VarDef, Block, Var, ExpressionStmt, TypeInfo, SuperExpr, NameExpr, CallExpr, MDEF, MemberExpr, ReturnStmt, AssignmentStmt, TypeExpr, PassStmt, SymbolTableNode\n'), ((179, 28, 179, 37), 'mypy.nodes.Block', 'Block', ({(179, 34, 179, 36): '[]'}, {}), '([])', False, 'from mypy.nodes import TypeDef, Node, FuncDef, VarDef, Block, Var, ExpressionStmt, TypeInfo, SuperExpr, NameExpr, CallExpr, MDEF, MemberExpr, ReturnStmt, AssignmentStmt, TypeExpr, PassStmt, SymbolTableNode\n'), ((371, 31, 371, 40), 'mypy.types.AnyType', 'AnyType', ({}, {}), '()', False, 'from mypy.types import Callable, Instance, Type, AnyType, BOUND_VAR, Void, RuntimeTypeVar, UnboundType\n'), ((376, 24, 376, 35), 'mypy.nodes.Var', 'Var', ({(376, 28, 376, 34): '"""self"""'}, {}), "('self')", False, 'from mypy.nodes import TypeDef, Node, FuncDef, VarDef, Block, Var, ExpressionStmt, TypeInfo, SuperExpr, NameExpr, CallExpr, MDEF, MemberExpr, ReturnStmt, AssignmentStmt, TypeExpr, PassStmt, SymbolTableNode\n'), ((376, 37, 376, 46), 'mypy.nodes.Var', 'Var', ({(376, 41, 376, 45): 'name'}, {}), '(name)', False, 'from mypy.nodes import TypeDef, Node, FuncDef, VarDef, Block, Var, ExpressionStmt, TypeInfo, SuperExpr, NameExpr, CallExpr, MDEF, MemberExpr, ReturnStmt, AssignmentStmt, TypeExpr, PassStmt, SymbolTableNode\n'), ((479, 21, 479, 36), 'mypy.nodes.NameExpr', 'NameExpr', ({(479, 30, 479, 35): '"""__o"""'}, {}), "('__o')", False, 'from mypy.nodes import TypeDef, Node, FuncDef, VarDef, Block, Var, ExpressionStmt, TypeInfo, SuperExpr, NameExpr, CallExpr, MDEF, MemberExpr, ReturnStmt, AssignmentStmt, TypeExpr, PassStmt, SymbolTableNode\n'), ((480, 27, 480, 42), 'mypy.maptypevar.num_slots', 'num_slots', ({(480, 37, 480, 41): 'base'}, {}), '(base)', False, 'from mypy.maptypevar import num_slots, get_tvar_access_path\n'), ((482, 27, 482, 42), 'mypy.maptypevar.num_slots', 'num_slots', ({(482, 37, 482, 41): 'base'}, {}), '(base)', False, 'from mypy.maptypevar import num_slots, get_tvar_access_path\n'), ((485, 25, 485, 42), 'mypy.nodes.ExpressionStmt', 'ExpressionStmt', ({(485, 40, 485, 41): 'c'}, {}), '(c)', False, 'from mypy.nodes import TypeDef, Node, FuncDef, VarDef, Block, Var, ExpressionStmt, TypeInfo, SuperExpr, NameExpr, CallExpr, MDEF, MemberExpr, ReturnStmt, AssignmentStmt, TypeExpr, PassStmt, SymbolTableNode\n'), ((492, 36, 492, 51), 'mypy.nodes.NameExpr', 'NameExpr', ({(492, 45, 492, 50): '"""__o"""'}, {}), "('__o')", False, 'from mypy.nodes import TypeDef, Node, FuncDef, VarDef, Block, Var, ExpressionStmt, TypeInfo, SuperExpr, NameExpr, CallExpr, MDEF, MemberExpr, ReturnStmt, AssignmentStmt, TypeExpr, PassStmt, SymbolTableNode\n'), ((511, 32, 511, 38), 'mypy.types.Void', 'Void', ({}, {}), '()', False, 'from mypy.types import Callable, Instance, Type, AnyType, BOUND_VAR, Void, RuntimeTypeVar, UnboundType\n'), ((547, 46, 547, 55), 'mypy.types.AnyType', 'AnyType', ({}, {}), '()', False, 'from mypy.types import Callable, Instance, Type, AnyType, BOUND_VAR, Void, RuntimeTypeVar, UnboundType\n'), ((548, 42, 548, 51), 'mypy.types.AnyType', 'AnyType', ({}, {}), '()', False, 'from mypy.types import Callable, Instance, Type, AnyType, BOUND_VAR, Void, RuntimeTypeVar, UnboundType\n'), ((558, 27, 558, 48), 'mypy.maptypevar.num_slots', 'num_slots', ({(558, 37, 558, 47): 'creat.info'}, {}), '(creat.info)', False, 'from mypy.maptypevar import num_slots, get_tvar_access_path\n'), ((530, 19, 530, 52), 'mypy.maptypevar.get_tvar_access_path', 'get_tvar_access_path', ({(530, 40, 530, 44): 'info', (530, 46, 530, 51): '(n + 1)'}, {}), '(info, n + 1)', False, 'from mypy.maptypevar import num_slots, get_tvar_access_path\n'), ((565, 50, 565, 59), 'mypy.types.AnyType', 'AnyType', ({}, {}), '()', False, 'from mypy.types import Callable, Instance, Type, AnyType, BOUND_VAR, Void, RuntimeTypeVar, UnboundType\n'), ((566, 46, 566, 55), 'mypy.types.AnyType', 'AnyType', ({}, {}), '()', False, 'from mypy.types import Callable, Instance, Type, AnyType, BOUND_VAR, Void, RuntimeTypeVar, UnboundType\n'), ((614, 33, 614, 42), 'mypy.types.AnyType', 'AnyType', ({}, {}), '()', False, 'from mypy.types import Callable, Instance, Type, AnyType, BOUND_VAR, Void, RuntimeTypeVar, UnboundType\n'), ((461, 28, 461, 37), 'mypy.types.AnyType', 'AnyType', ({}, {}), '()', False, 'from mypy.types import Callable, Instance, Type, AnyType, BOUND_VAR, Void, RuntimeTypeVar, UnboundType\n'), ((481, 38, 481, 58), 'mypy.transutil.tvar_arg_name', 'tvar_arg_name', ({(481, 52, 481, 57): '(n + 1)'}, {}), '(n + 1)', False, 'from mypy.transutil import self_expr, tvar_slot_name, tvar_arg_name, prepend_arg_type\n'), ((483, 38, 483, 69), 'mypy.transutil.tvar_arg_name', 'tvar_arg_name', ({(483, 52, 483, 57): '(n + 1)', (483, 59, 483, 68): 'BOUND_VAR'}, {}), '(n + 1, BOUND_VAR)', False, 'from mypy.transutil import self_expr, tvar_slot_name, tvar_arg_name, prepend_arg_type\n'), ((489, 41, 489, 52), 'mypy.transutil.self_expr', 'self_expr', ({}, {}), '()', False, 'from mypy.transutil import self_expr, tvar_slot_name, tvar_arg_name, prepend_arg_type\n'), ((500, 32, 500, 57), 'mypy.transutil.tvar_arg_name', 'tvar_arg_name', ({(500, 46, 500, 51): '(n + 1)', (500, 53, 500, 56): 'alt'}, {}), '(n + 1, alt)', False, 'from mypy.transutil import self_expr, tvar_slot_name, tvar_arg_name, prepend_arg_type\n'), ((509, 34, 509, 43), 'mypy.types.AnyType', 'AnyType', ({}, {}), '()', False, 'from mypy.types import Callable, Instance, Type, AnyType, BOUND_VAR, Void, RuntimeTypeVar, UnboundType\n'), ((543, 46, 543, 57), 'mypy.transutil.self_expr', 'self_expr', ({}, {}), '()', False, 'from mypy.transutil import self_expr, tvar_slot_name, tvar_arg_name, prepend_arg_type\n'), ((544, 46, 544, 63), 'mypy.transutil.tvar_slot_name', 'tvar_slot_name', ({(544, 61, 544, 62): 'n'}, {}), '(n)', False, 'from mypy.transutil import self_expr, tvar_slot_name, tvar_arg_name, prepend_arg_type\n'), ((560, 44, 560, 66), 'mypy.transutil.tvar_slot_name', 'tvar_slot_name', ({(560, 59, 560, 60): 'n', (560, 62, 560, 65): 'alt'}, {}), '(n, alt)', False, 'from mypy.transutil import self_expr, tvar_slot_name, tvar_arg_name, prepend_arg_type\n'), ((562, 32, 562, 43), 'mypy.transutil.self_expr', 'self_expr', ({}, {}), '()', False, 'from mypy.transutil import self_expr, tvar_slot_name, tvar_arg_name, prepend_arg_type\n'), ((563, 32, 563, 54), 'mypy.transutil.tvar_slot_name', 'tvar_slot_name', ({(563, 47, 563, 48): 'n', (563, 50, 563, 53): 'alt'}, {}), '(n, alt)', False, 'from mypy.transutil import self_expr, tvar_slot_name, tvar_arg_name, prepend_arg_type\n'), ((532, 40, 532, 68), 'mypy.transutil.tvar_slot_name', 'tvar_slot_name', ({(532, 55, 532, 59): 'slot', (532, 61, 532, 67): 'is_alt'}, {}), '(slot, is_alt)', False, 'from mypy.transutil import self_expr, tvar_slot_name, tvar_arg_name, prepend_arg_type\n'), ((533, 40, 533, 49), 'mypy.types.AnyType', 'AnyType', ({}, {}), '()', False, 'from mypy.types import Callable, Instance, Type, AnyType, BOUND_VAR, Void, RuntimeTypeVar, UnboundType\n')] |
AhmadManzoor/jazzpos | jazzpos/admin.py | 7b771095b8df52d036657f33f36a97efb575d36c | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django_tablib.admin import TablibAdmin
from jazzpos.models import Customer, Patient, Store, CustomerType, StoreSettings
from jazzpos.models import UserProfile
class CustomerAdmin(TablibAdmin):
formats = ['xls', 'csv',]
class PatientAdmin(TablibAdmin):
formats = ['xls', 'csv',]
class StoreAdmin(admin.ModelAdmin):
pass
class StoreSettingsAdmin(admin.ModelAdmin):
pass
class CustomerTypeAdmin(admin.ModelAdmin):
pass
class UserProfileInline(admin.StackedInline):
model = UserProfile
UserAdmin.inlines = [UserProfileInline,]
admin.site.register(Customer, CustomerAdmin)
admin.site.register(Patient, PatientAdmin)
admin.site.register(Store, StoreAdmin)
admin.site.register(StoreSettings, StoreSettingsAdmin)
admin.site.register(CustomerType, CustomerTypeAdmin)
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
| [((30, 0, 30, 44), 'django.contrib.admin.site.register', 'admin.site.register', ({(30, 20, 30, 28): 'Customer', (30, 30, 30, 43): 'CustomerAdmin'}, {}), '(Customer, CustomerAdmin)', False, 'from django.contrib import admin\n'), ((31, 0, 31, 42), 'django.contrib.admin.site.register', 'admin.site.register', ({(31, 20, 31, 27): 'Patient', (31, 29, 31, 41): 'PatientAdmin'}, {}), '(Patient, PatientAdmin)', False, 'from django.contrib import admin\n'), ((32, 0, 32, 38), 'django.contrib.admin.site.register', 'admin.site.register', ({(32, 20, 32, 25): 'Store', (32, 27, 32, 37): 'StoreAdmin'}, {}), '(Store, StoreAdmin)', False, 'from django.contrib import admin\n'), ((33, 0, 33, 54), 'django.contrib.admin.site.register', 'admin.site.register', ({(33, 20, 33, 33): 'StoreSettings', (33, 35, 33, 53): 'StoreSettingsAdmin'}, {}), '(StoreSettings, StoreSettingsAdmin)', False, 'from django.contrib import admin\n'), ((34, 0, 34, 52), 'django.contrib.admin.site.register', 'admin.site.register', ({(34, 20, 34, 32): 'CustomerType', (34, 34, 34, 51): 'CustomerTypeAdmin'}, {}), '(CustomerType, CustomerTypeAdmin)', False, 'from django.contrib import admin\n'), ((36, 0, 36, 27), 'django.contrib.admin.site.unregister', 'admin.site.unregister', ({(36, 22, 36, 26): 'User'}, {}), '(User)', False, 'from django.contrib import admin\n'), ((37, 0, 37, 36), 'django.contrib.admin.site.register', 'admin.site.register', ({(37, 20, 37, 24): 'User', (37, 26, 37, 35): 'UserAdmin'}, {}), '(User, UserAdmin)', False, 'from django.contrib import admin\n')] |
vfdev-5/ignite-examples | classification/imaterialist_challenge_furniture_2018/configs/train/train_inceptionresnetv2_350_ssd_like_v3.py | fb15b59e2b159e1e2bc4628f8756055e9154f5c8 | # Basic training configuration file
from torch.optim import RMSprop
from torch.optim.lr_scheduler import MultiStepLR
from torchvision.transforms import RandomHorizontalFlip, Compose
from torchvision.transforms import RandomResizedCrop, RandomAffine, RandomApply
from torchvision.transforms import ColorJitter, ToTensor, Normalize
from common.dataset import FilesFromCsvDataset
from common.data_loaders import get_data_loader
from models.inceptionresnetv2_ssd_like import FurnitureInceptionResNetV4350SSDLike_v3
SEED = 17
DEBUG = True
DEVICE = 'cuda'
OUTPUT_PATH = "output"
size = 350
TRAIN_TRANSFORMS = Compose([
RandomApply(
[RandomAffine(degrees=10, resample=3, fillcolor=(255, 255, 255)), ],
p=0.5
),
RandomResizedCrop(size, scale=(0.7, 1.0), interpolation=3),
RandomHorizontalFlip(p=0.5),
ColorJitter(hue=0.12, brightness=0.12),
ToTensor(),
Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
])
VAL_TRANSFORMS = TRAIN_TRANSFORMS
BATCH_SIZE = 24
NUM_WORKERS = 15
dataset = FilesFromCsvDataset("output/unique_filtered_train_dataset.csv")
TRAIN_LOADER = get_data_loader(dataset,
data_transform=TRAIN_TRANSFORMS,
batch_size=BATCH_SIZE,
num_workers=NUM_WORKERS,
pin_memory='cuda' in DEVICE)
val_dataset = FilesFromCsvDataset("output/unique_filtered_val_dataset.csv")
VAL_LOADER = get_data_loader(val_dataset,
data_transform=VAL_TRANSFORMS,
batch_size=BATCH_SIZE,
num_workers=NUM_WORKERS,
pin_memory='cuda' in DEVICE)
MODEL = FurnitureInceptionResNetV4350SSDLike_v3(num_classes=128, pretrained='imagenet')
N_EPOCHS = 100
OPTIM = RMSprop(
params=[
{"params": MODEL.extractor.stem.parameters(), 'lr': 0.0001},
{"params": MODEL.extractor.low_features_a.parameters(), 'lr': 0.00045},
{"params": MODEL.extractor.low_features_b.parameters(), 'lr': 0.00045},
{"params": MODEL.extractor.mid_features.parameters(), 'lr': 0.0045},
{"params": MODEL.extractor.top_features.parameters(), 'lr': 0.0045},
{"params": MODEL.extractor.smooth_layers.parameters(), 'lr': 0.045},
{"params": MODEL.cls_layers.parameters(), 'lr': 0.045},
{"params": MODEL.boxes_to_classes.parameters(), 'lr': 0.045},
{"params": MODEL.final_classifier.parameters(), 'lr': 0.045},
],
alpha=0.9,
eps=1.0
)
LR_SCHEDULERS = [
MultiStepLR(OPTIM, milestones=[4, 5, 6, 7, 8, 10, 11, 13, 14, 15], gamma=0.5),
]
EARLY_STOPPING_KWARGS = {
'patience': 25,
# 'score_function': None
}
LOG_INTERVAL = 100
| [((40, 10, 40, 73), 'common.dataset.FilesFromCsvDataset', 'FilesFromCsvDataset', ({(40, 30, 40, 72): '"""output/unique_filtered_train_dataset.csv"""'}, {}), "('output/unique_filtered_train_dataset.csv')", False, 'from common.dataset import FilesFromCsvDataset\n'), ((41, 15, 45, 59), 'common.data_loaders.get_data_loader', 'get_data_loader', (), '', False, 'from common.data_loaders import get_data_loader\n'), ((48, 14, 48, 75), 'common.dataset.FilesFromCsvDataset', 'FilesFromCsvDataset', ({(48, 34, 48, 74): '"""output/unique_filtered_val_dataset.csv"""'}, {}), "('output/unique_filtered_val_dataset.csv')", False, 'from common.dataset import FilesFromCsvDataset\n'), ((49, 13, 53, 57), 'common.data_loaders.get_data_loader', 'get_data_loader', (), '', False, 'from common.data_loaders import get_data_loader\n'), ((56, 8, 56, 87), 'models.inceptionresnetv2_ssd_like.FurnitureInceptionResNetV4350SSDLike_v3', 'FurnitureInceptionResNetV4350SSDLike_v3', (), '', False, 'from models.inceptionresnetv2_ssd_like import FurnitureInceptionResNetV4350SSDLike_v3\n'), ((80, 4, 80, 81), 'torch.optim.lr_scheduler.MultiStepLR', 'MultiStepLR', (), '', False, 'from torch.optim.lr_scheduler import MultiStepLR\n'), ((25, 4, 25, 62), 'torchvision.transforms.RandomResizedCrop', 'RandomResizedCrop', (), '', False, 'from torchvision.transforms import RandomResizedCrop, RandomAffine, RandomApply\n'), ((26, 4, 26, 31), 'torchvision.transforms.RandomHorizontalFlip', 'RandomHorizontalFlip', (), '', False, 'from torchvision.transforms import RandomHorizontalFlip, Compose\n'), ((27, 4, 27, 42), 'torchvision.transforms.ColorJitter', 'ColorJitter', (), '', False, 'from torchvision.transforms import ColorJitter, ToTensor, Normalize\n'), ((28, 4, 28, 14), 'torchvision.transforms.ToTensor', 'ToTensor', ({}, {}), '()', False, 'from torchvision.transforms import ColorJitter, ToTensor, Normalize\n'), ((29, 4, 29, 56), 'torchvision.transforms.Normalize', 'Normalize', (), '', False, 'from torchvision.transforms import ColorJitter, ToTensor, Normalize\n'), ((22, 9, 22, 72), 'torchvision.transforms.RandomAffine', 'RandomAffine', (), '', False, 'from torchvision.transforms import RandomResizedCrop, RandomAffine, RandomApply\n')] |
QuESt-Calculator/pyscf | examples/qmmm/02-mcscf.py | 0ed03633b699505c7278f1eb501342667d0aa910 | #!/usr/bin/env python
#
# Author: Qiming Sun <[email protected]>
#
'''
A simple example to run MCSCF with background charges.
'''
import numpy
from pyscf import gto, scf, mcscf, qmmm
mol = gto.M(atom='''
C 1.1879 -0.3829 0.0000
C 0.0000 0.5526 0.0000
O -1.1867 -0.2472 0.0000
H -1.9237 0.3850 0.0000
H 2.0985 0.2306 0.0000
H 1.1184 -1.0093 0.8869
H 1.1184 -1.0093 -0.8869
H -0.0227 1.1812 0.8852
H -0.0227 1.1812 -0.8852
''',
basis='3-21g',
verbose=4)
numpy.random.seed(1)
coords = numpy.random.random((5,3)) * 10
charges = (numpy.arange(5) + 1.) * -.1
#
# There are two ways to add background charges to MCSCF method.
# The recommended one is to initialize it in SCF calculation. The MCSCF
# calculation takes the information from SCF objects.
#
mf = qmmm.mm_charge(scf.RHF(mol), coords, charges).run()
mc = mcscf.CASSCF(mf, 6, 6)
mc.run()
mc = mcscf.CASCI(mf, 6, 6)
mc.run()
#
# The other method is to patch the MCSCF object with the background charges.
# Note: it updates the underlying SCF object inplace.
#
mo_init = mf.mo_coeff
mf = scf.RHF(mol)
mc = mcscf.CASSCF(mf, 6, 6)
mc = qmmm.mm_charge(mc, coords, charges)
mc.run(mo_init)
mf = scf.RHF(mol)
mc = mcscf.CASCI(mf, 6, 6)
mc = qmmm.mm_charge(mc, coords, charges)
mc.run(mo_init)
| [((13, 6, 25, 22), 'pyscf.gto.M', 'gto.M', (), '', False, 'from pyscf import gto, scf, mcscf, qmmm\n'), ((27, 0, 27, 20), 'numpy.random.seed', 'numpy.random.seed', ({(27, 18, 27, 19): '(1)'}, {}), '(1)', False, 'import numpy\n'), ((38, 5, 38, 27), 'pyscf.mcscf.CASSCF', 'mcscf.CASSCF', ({(38, 18, 38, 20): 'mf', (38, 22, 38, 23): '6', (38, 25, 38, 26): '6'}, {}), '(mf, 6, 6)', False, 'from pyscf import gto, scf, mcscf, qmmm\n'), ((41, 5, 41, 26), 'pyscf.mcscf.CASCI', 'mcscf.CASCI', ({(41, 17, 41, 19): 'mf', (41, 21, 41, 22): '6', (41, 24, 41, 25): '6'}, {}), '(mf, 6, 6)', False, 'from pyscf import gto, scf, mcscf, qmmm\n'), ((50, 5, 50, 17), 'pyscf.scf.RHF', 'scf.RHF', ({(50, 13, 50, 16): 'mol'}, {}), '(mol)', False, 'from pyscf import gto, scf, mcscf, qmmm\n'), ((51, 5, 51, 27), 'pyscf.mcscf.CASSCF', 'mcscf.CASSCF', ({(51, 18, 51, 20): 'mf', (51, 22, 51, 23): '6', (51, 25, 51, 26): '6'}, {}), '(mf, 6, 6)', False, 'from pyscf import gto, scf, mcscf, qmmm\n'), ((52, 5, 52, 40), 'pyscf.qmmm.mm_charge', 'qmmm.mm_charge', ({(52, 20, 52, 22): 'mc', (52, 24, 52, 30): 'coords', (52, 32, 52, 39): 'charges'}, {}), '(mc, coords, charges)', False, 'from pyscf import gto, scf, mcscf, qmmm\n'), ((55, 5, 55, 17), 'pyscf.scf.RHF', 'scf.RHF', ({(55, 13, 55, 16): 'mol'}, {}), '(mol)', False, 'from pyscf import gto, scf, mcscf, qmmm\n'), ((56, 5, 56, 26), 'pyscf.mcscf.CASCI', 'mcscf.CASCI', ({(56, 17, 56, 19): 'mf', (56, 21, 56, 22): '6', (56, 24, 56, 25): '6'}, {}), '(mf, 6, 6)', False, 'from pyscf import gto, scf, mcscf, qmmm\n'), ((57, 5, 57, 40), 'pyscf.qmmm.mm_charge', 'qmmm.mm_charge', ({(57, 20, 57, 22): 'mc', (57, 24, 57, 30): 'coords', (57, 32, 57, 39): 'charges'}, {}), '(mc, coords, charges)', False, 'from pyscf import gto, scf, mcscf, qmmm\n'), ((28, 9, 28, 35), 'numpy.random.random', 'numpy.random.random', ({(28, 29, 28, 34): '(5, 3)'}, {}), '((5, 3))', False, 'import numpy\n'), ((29, 11, 29, 26), 'numpy.arange', 'numpy.arange', ({(29, 24, 29, 25): '(5)'}, {}), '(5)', False, 'import numpy\n'), ((36, 20, 36, 32), 'pyscf.scf.RHF', 'scf.RHF', ({(36, 28, 36, 31): 'mol'}, {}), '(mol)', False, 'from pyscf import gto, scf, mcscf, qmmm\n')] |
uk-gov-mirror/ministryofjustice.money-to-prisoners-send-money | mtp_send_money/apps/send_money/utils.py | 80db0cf5f384f93d35387a757605cfddbc98935f | import datetime
from decimal import Decimal, ROUND_DOWN, ROUND_UP
import logging
import re
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.validators import RegexValidator
from django.utils import formats
from django.utils.cache import patch_cache_control
from django.utils.dateformat import format as format_date
from django.utils.dateparse import parse_date
from django.utils.encoding import force_text
from django.utils.translation import gettext_lazy as _
from django.views.generic import TemplateView
from mtp_common.auth import api_client, urljoin
import requests
from requests.exceptions import Timeout
logger = logging.getLogger('mtp')
prisoner_number_re = re.compile(r'^[a-z]\d\d\d\d[a-z]{2}$', re.IGNORECASE)
def get_api_session():
return api_client.get_authenticated_api_session(
settings.SHARED_API_USERNAME,
settings.SHARED_API_PASSWORD,
)
def check_payment_service_available():
# service is deemed unavailable only if status is explicitly false, not if it cannot be determined
try:
response = requests.get(api_url('/service-availability/'), timeout=5)
gov_uk_status = response.json().get('gov_uk_pay', {})
return gov_uk_status.get('status', True), gov_uk_status.get('message_to_users')
except (Timeout, ValueError):
return True, None
def validate_prisoner_number(value):
if not prisoner_number_re.match(value):
raise ValidationError(_('Incorrect prisoner number format'), code='invalid')
class RejectCardNumberValidator(RegexValidator):
regex = r'\d{4}\s*\d{4}\s*\d{4}\s*\d{4}'
inverse_match = True
code = 'card_number'
message = _('Please do not enter your debit card number here')
def format_percentage(number, decimals=1, trim_zeros=True):
if not isinstance(number, Decimal):
number = Decimal(number)
percentage_text = ('{0:.%sf}' % decimals).format(number)
if decimals and trim_zeros and percentage_text.endswith('.' + ('0' * decimals)):
percentage_text = percentage_text[:-decimals - 1]
return percentage_text + '%'
def currency_format(amount, trim_empty_pence=False):
"""
Formats a number into currency format
@param amount: amount in pounds
@param trim_empty_pence: if True, strip off .00
"""
if not isinstance(amount, Decimal):
amount = unserialise_amount(amount)
text_amount = serialise_amount(amount)
if trim_empty_pence and text_amount.endswith('.00'):
text_amount = text_amount[:-3]
return '£' + text_amount
def currency_format_pence(amount, trim_empty_pence=False):
"""
Formats a number into currency format display pence only as #p
@param amount: amount in pounds
@param trim_empty_pence: if True, strip off .00
"""
if not isinstance(amount, Decimal):
amount = unserialise_amount(amount)
if amount.__abs__() < Decimal('1'):
return '%sp' % (amount * Decimal('100')).to_integral_value()
return currency_format(amount, trim_empty_pence=trim_empty_pence)
def clamp_amount(amount):
"""
Round the amount to integer pence,
rounding fractional pence up (away from zero) for any fractional pence value
that is greater than or equal to a tenth of a penny.
@param amount: Decimal amount to round
"""
tenths_of_pennies = (amount * Decimal('1000')).to_integral_value(rounding=ROUND_DOWN)
pounds = tenths_of_pennies / Decimal('1000')
return pounds.quantize(Decimal('1.00'), rounding=ROUND_UP)
def get_service_charge(amount, clamp=True):
if not isinstance(amount, Decimal):
amount = Decimal(amount)
percentage_charge = amount * settings.SERVICE_CHARGE_PERCENTAGE / Decimal('100')
service_charge = percentage_charge + settings.SERVICE_CHARGE_FIXED
if clamp:
return clamp_amount(service_charge)
return service_charge
def get_total_charge(amount, clamp=True):
if not isinstance(amount, Decimal):
amount = Decimal(amount)
charge = get_service_charge(amount, clamp=False)
result = amount + charge
if clamp:
return clamp_amount(result)
return result
def serialise_amount(amount):
return '{0:.2f}'.format(amount)
def unserialise_amount(amount_text):
amount_text = force_text(amount_text)
return Decimal(amount_text)
def serialise_date(date):
return format_date(date, 'Y-m-d')
def unserialise_date(date_text):
date_text = force_text(date_text)
date = parse_date(date_text)
if not date:
raise ValueError('Invalid date')
return date
def lenient_unserialise_date(date_text):
date_text = force_text(date_text)
date_formats = formats.get_format('DATE_INPUT_FORMATS')
for date_format in date_formats:
try:
return datetime.datetime.strptime(date_text, date_format).date()
except (ValueError, TypeError):
continue
raise ValueError('Invalid date')
def govuk_headers():
return {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Authorization': 'Bearer %s' % settings.GOVUK_PAY_AUTH_TOKEN
}
def govuk_url(path):
return urljoin(settings.GOVUK_PAY_URL, path)
def api_url(path):
return urljoin(settings.API_URL, path)
def site_url(path):
return urljoin(settings.SITE_URL, path)
def get_link_by_rel(data, rel):
if rel in data['_links']:
return data['_links'][rel]['href']
def make_response_cacheable(response):
"""
Allow response to be public and cached for an hour
"""
patch_cache_control(response, public=True, max_age=3600)
return response
class CacheableTemplateView(TemplateView):
"""
For simple pages whose content rarely changes so can be cached for an hour
"""
def get(self, request, *args, **kwargs):
response = super().get(request, *args, **kwargs)
return make_response_cacheable(response)
| [((20, 9, 20, 33), 'logging.getLogger', 'logging.getLogger', ({(20, 27, 20, 32): '"""mtp"""'}, {}), "('mtp')", False, 'import logging\n'), ((21, 21, 21, 74), 're.compile', 're.compile', ({(21, 32, 21, 58): '"""^[a-z]\\\\d\\\\d\\\\d\\\\d[a-z]{2}$"""', (21, 60, 21, 73): 're.IGNORECASE'}, {}), "('^[a-z]\\\\d\\\\d\\\\d\\\\d[a-z]{2}$', re.IGNORECASE)", False, 'import re\n'), ((25, 11, 28, 5), 'mtp_common.auth.api_client.get_authenticated_api_session', 'api_client.get_authenticated_api_session', ({(26, 8, 26, 36): 'settings.SHARED_API_USERNAME', (27, 8, 27, 36): 'settings.SHARED_API_PASSWORD'}, {}), '(settings.SHARED_API_USERNAME,\n settings.SHARED_API_PASSWORD)', False, 'from mtp_common.auth import api_client, urljoin\n'), ((50, 14, 50, 66), 'django.utils.translation.gettext_lazy', '_', ({(50, 16, 50, 65): '"""Please do not enter your debit card number here"""'}, {}), "('Please do not enter your debit card number here')", True, 'from django.utils.translation import gettext_lazy as _\n'), ((126, 18, 126, 41), 'django.utils.encoding.force_text', 'force_text', ({(126, 29, 126, 40): 'amount_text'}, {}), '(amount_text)', False, 'from django.utils.encoding import force_text\n'), ((127, 11, 127, 31), 'decimal.Decimal', 'Decimal', ({(127, 19, 127, 30): 'amount_text'}, {}), '(amount_text)', False, 'from decimal import Decimal, ROUND_DOWN, ROUND_UP\n'), ((131, 11, 131, 37), 'django.utils.dateformat.format', 'format_date', ({(131, 23, 131, 27): 'date', (131, 29, 131, 36): '"""Y-m-d"""'}, {}), "(date, 'Y-m-d')", True, 'from django.utils.dateformat import format as format_date\n'), ((135, 16, 135, 37), 'django.utils.encoding.force_text', 'force_text', ({(135, 27, 135, 36): 'date_text'}, {}), '(date_text)', False, 'from django.utils.encoding import force_text\n'), ((136, 11, 136, 32), 'django.utils.dateparse.parse_date', 'parse_date', ({(136, 22, 136, 31): 'date_text'}, {}), '(date_text)', False, 'from django.utils.dateparse import parse_date\n'), ((143, 16, 143, 37), 'django.utils.encoding.force_text', 'force_text', ({(143, 27, 143, 36): 'date_text'}, {}), '(date_text)', False, 'from django.utils.encoding import force_text\n'), ((144, 19, 144, 59), 'django.utils.formats.get_format', 'formats.get_format', ({(144, 38, 144, 58): '"""DATE_INPUT_FORMATS"""'}, {}), "('DATE_INPUT_FORMATS')", False, 'from django.utils import formats\n'), ((162, 11, 162, 48), 'mtp_common.auth.urljoin', 'urljoin', ({(162, 19, 162, 41): 'settings.GOVUK_PAY_URL', (162, 43, 162, 47): 'path'}, {}), '(settings.GOVUK_PAY_URL, path)', False, 'from mtp_common.auth import api_client, urljoin\n'), ((166, 11, 166, 42), 'mtp_common.auth.urljoin', 'urljoin', ({(166, 19, 166, 35): 'settings.API_URL', (166, 37, 166, 41): 'path'}, {}), '(settings.API_URL, path)', False, 'from mtp_common.auth import api_client, urljoin\n'), ((170, 11, 170, 43), 'mtp_common.auth.urljoin', 'urljoin', ({(170, 19, 170, 36): 'settings.SITE_URL', (170, 38, 170, 42): 'path'}, {}), '(settings.SITE_URL, path)', False, 'from mtp_common.auth import api_client, urljoin\n'), ((182, 4, 182, 60), 'django.utils.cache.patch_cache_control', 'patch_cache_control', (), '', False, 'from django.utils.cache import patch_cache_control\n'), ((55, 17, 55, 32), 'decimal.Decimal', 'Decimal', ({(55, 25, 55, 31): 'number'}, {}), '(number)', False, 'from decimal import Decimal, ROUND_DOWN, ROUND_UP\n'), ((84, 26, 84, 38), 'decimal.Decimal', 'Decimal', ({(84, 34, 84, 37): '"""1"""'}, {}), "('1')", False, 'from decimal import Decimal, ROUND_DOWN, ROUND_UP\n'), ((97, 33, 97, 48), 'decimal.Decimal', 'Decimal', ({(97, 41, 97, 47): '"""1000"""'}, {}), "('1000')", False, 'from decimal import Decimal, ROUND_DOWN, ROUND_UP\n'), ((98, 27, 98, 42), 'decimal.Decimal', 'Decimal', ({(98, 35, 98, 41): '"""1.00"""'}, {}), "('1.00')", False, 'from decimal import Decimal, ROUND_DOWN, ROUND_UP\n'), ((103, 17, 103, 32), 'decimal.Decimal', 'Decimal', ({(103, 25, 103, 31): 'amount'}, {}), '(amount)', False, 'from decimal import Decimal, ROUND_DOWN, ROUND_UP\n'), ((104, 70, 104, 84), 'decimal.Decimal', 'Decimal', ({(104, 78, 104, 83): '"""100"""'}, {}), "('100')", False, 'from decimal import Decimal, ROUND_DOWN, ROUND_UP\n'), ((113, 17, 113, 32), 'decimal.Decimal', 'Decimal', ({(113, 25, 113, 31): 'amount'}, {}), '(amount)', False, 'from decimal import Decimal, ROUND_DOWN, ROUND_UP\n'), ((43, 30, 43, 67), 'django.utils.translation.gettext_lazy', '_', ({(43, 32, 43, 66): '"""Incorrect prisoner number format"""'}, {}), "('Incorrect prisoner number format')", True, 'from django.utils.translation import gettext_lazy as _\n'), ((96, 34, 96, 49), 'decimal.Decimal', 'Decimal', ({(96, 42, 96, 48): '"""1000"""'}, {}), "('1000')", False, 'from decimal import Decimal, ROUND_DOWN, ROUND_UP\n'), ((147, 19, 147, 69), 'datetime.datetime.strptime', 'datetime.datetime.strptime', ({(147, 46, 147, 55): 'date_text', (147, 57, 147, 68): 'date_format'}, {}), '(date_text, date_format)', False, 'import datetime\n'), ((85, 33, 85, 47), 'decimal.Decimal', 'Decimal', ({(85, 41, 85, 46): '"""100"""'}, {}), "('100')", False, 'from decimal import Decimal, ROUND_DOWN, ROUND_UP\n')] |
Zumbi-ML/zmbRELEV | src/zmbrelev/config.py | e6a6f789804d7230415f390da905e94ae2ab27f5 | # -*- coding: UTF-8 -*-
import os
this_file_path = os.path.dirname(os.path.realpath(__file__))
MODELS_DIR = os.path.join(this_file_path, "models/")
| [((7, 13, 7, 52), 'os.path.join', 'os.path.join', ({(7, 26, 7, 40): 'this_file_path', (7, 42, 7, 51): '"""models/"""'}, {}), "(this_file_path, 'models/')", False, 'import os\n'), ((5, 33, 5, 59), 'os.path.realpath', 'os.path.realpath', ({(5, 50, 5, 58): '__file__'}, {}), '(__file__)', False, 'import os\n')] |
EdgarVallejo96/pyEdureka | ArraysP2.py | f103f67ed4f9eee6ab924237e9d94a489e602c7c | import array as arr
a = arr.array('i', [ 1,2,3,4,5,6])
print(a)
# Accessing elements
print(a[2])
print(a[-2])
# BASIC ARRAY OPERATIONS
# Find length of array
print()
print('Length of array')
print(len(a))
# Adding elments to an array
# append() to add a single element at the end of an array
# extend() to add more than one element at the end of an array
# insert() to add an element at a specific position in an array
print()
# append
print('Append')
a.append(8)
print(a)
# extend
print()
print('Extend')
a.extend([9,8,6,5,4])
print(a)
# insert
print()
print('Insert')
a.insert(2,6) # first param is the index, second param is the value
print(a)
# Removing elements from an array
# pop() Remove an element and return it
# remove() Remove element with a specific value without returning it
print()
print(a)
# pop
print('pop')
print(a.pop()) # removes last element
print(a)
print(a.pop(2))
print(a)
print(a.pop(-1))
print(a)
# remove
print()
print('remove')
print(a.remove(8)) # doesn't return what it removes, it removed the first occurrence of '8'
print(a)
# Array Concatenation
print()
print('Array Concatenation')
b = arr.array('i', [1,2,3,4,5,6,7])
c = arr.array('i', [3,4,2,1,3,5,6,7,8])
d = arr.array('i')
d = b + c
print(d)
# Slicing an Array
print()
print('Slicing an Array') # This means fetching some particular values from an array
print(d)
print(d[0:5]) # Doesn't include the value on the right index
print(d[0:-2])
print(d[::-1]) # Reverse the array, this method is not preferred because it exauhsts the memory
# Looping through an Array
print()
print('Looping through an Array')
print('Using for')
for x in d:
print(x, end=' ')
print()
for x in d[0:-3]:
print(x, end=' ')
print()
print('Using while')
temp = 0
while temp < d[2]:
print(d[temp], end = ' ')
temp = temp + 1 # Can use temp+=1, it's the same thing
print()
print(a)
tem = 0
while tem < len(a):
print(a[tem], end=' ')
tem += 1
print()
| [((2, 4, 2, 34), 'array.array', 'arr.array', ({(2, 14, 2, 17): '"""i"""', (2, 19, 2, 33): '[1, 2, 3, 4, 5, 6]'}, {}), "('i', [1, 2, 3, 4, 5, 6])", True, 'import array as arr\n'), ((62, 4, 62, 35), 'array.array', 'arr.array', ({(62, 14, 62, 17): '"""i"""', (62, 19, 62, 34): '[1, 2, 3, 4, 5, 6, 7]'}, {}), "('i', [1, 2, 3, 4, 5, 6, 7])", True, 'import array as arr\n'), ((63, 4, 63, 39), 'array.array', 'arr.array', ({(63, 14, 63, 17): '"""i"""', (63, 19, 63, 38): '[3, 4, 2, 1, 3, 5, 6, 7, 8]'}, {}), "('i', [3, 4, 2, 1, 3, 5, 6, 7, 8])", True, 'import array as arr\n'), ((64, 4, 64, 18), 'array.array', 'arr.array', ({(64, 14, 64, 17): '"""i"""'}, {}), "('i')", True, 'import array as arr\n')] |
RichardLitt/Vesper | vesper/mpg_ranch/nfc_detector_low_score_classifier_1_0/classifier.py | 5360844f42a06942e7684121c650b08cf8616285 | """
Module containing low score classifier for MPG Ranch NFC detectors.
An instance of the `Classifier` class of this module assigns the `LowScore`
classification to a clip if the clip has no `Classification` annotation and
has a `DetectorScore` annotation whose value is less than a threshold.
This classifier is intended for use on clips created by the the
MPG Ranch Thrush Detector 1.0 and the MPG Ranch Tseep Detector 1.0.
"""
import logging
from vesper.command.annotator import Annotator
from vesper.django.app.models import AnnotationInfo, StringAnnotation
_logger = logging.getLogger()
_SCORE_THRESHOLDS = {
# For 50 percent precision on validation recordings.
'MPG Ranch Thrush Detector 1.0 40': 70,
'MPG Ranch Tseep Detector 1.0 20': 41,
# For 75 percent precision on validation recordings.
# 'MPG Ranch Thrush Detector 1.0 40': 91,
# 'MPG Ranch Tseep Detector 1.0 20': 63,
}
class Classifier(Annotator):
extension_name = 'MPG Ranch NFC Detector Low Score Classifier 1.0'
def __init__(
self, annotation_info, creating_user=None, creating_job=None,
creating_processor=None):
super().__init__(
annotation_info, creating_user, creating_job, creating_processor)
self._score_annotation_info = _get_annotation_info('Detector Score')
self._score_thresholds = _SCORE_THRESHOLDS
def annotate(self, clip):
annotated = False
classification = self._get_annotation_value(clip)
if classification is None:
# clip is unclassified
score = self._get_score(clip)
if score is not None:
# clip has a detector score
threshold = self._get_score_threshold(clip)
if threshold is not None and score < threshold:
# detector score is below threshold
self._annotate(clip, 'LowScore')
annotated = True
return annotated
def _get_score(self, clip):
try:
annotation = StringAnnotation.objects.get(
clip=clip, info=self._score_annotation_info)
except StringAnnotation.DoesNotExist:
return None
else:
return float(annotation.value)
def _get_score_threshold(self, clip):
detector = clip.creating_processor
if detector is None:
return None
else:
return self._score_thresholds.get(detector.name)
def _get_annotation_info(name):
try:
return AnnotationInfo.objects.get(name=name)
except AnnotationInfo.DoesNotExist:
raise ValueError(
'Unrecognized annotation "{}".'.format(name))
| [((19, 10, 19, 29), 'logging.getLogger', 'logging.getLogger', ({}, {}), '()', False, 'import logging\n'), ((98, 15, 98, 52), 'vesper.django.app.models.AnnotationInfo.objects.get', 'AnnotationInfo.objects.get', (), '', False, 'from vesper.django.app.models import AnnotationInfo, StringAnnotation\n'), ((80, 25, 81, 60), 'vesper.django.app.models.StringAnnotation.objects.get', 'StringAnnotation.objects.get', (), '', False, 'from vesper.django.app.models import AnnotationInfo, StringAnnotation\n')] |
yitzikc/athena2pd | setup.py | d2d6b886a70e958f51d90103600572152eaa7bb9 | from setuptools import setup, find_packages
def find_version(path):
import re
# path shall be a plain ascii tetxt file
s = open(path, 'rt').read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", s, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError('Version not found')
def get_requirements(filename):
with open(filename, 'r') as fh:
return [l.strip() for l in fh]
def get_long_desc(filename):
with open(filename, 'r') as fh:
return fh.read()
setup(
name='athena2pd',
packages=['athena2pd'],
version=find_version('athena2pd/__init__.py'),
description='Help\'s simplify the access of databases stored in Amazon Athena by using SQL and pandas DataFrames.',
long_description=get_long_desc('README.md'),
long_description_content_type='text/markdown',
author='Joe Dementri',
maintainer='Joe Dementri',
maintainer_email='[email protected]',
license='MIT',
install_requires=get_requirements('requirements.txt'),
zip_safe=False,
url='https://github.com/joedementri/athena2pd',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent'
],
python_requires='>=2.7,>=3.6'
) | [((8, 20, 8, 77), 're.search', 're.search', ({(8, 30, 8, 67): '"""^__version__ = [\'\\\\"]([^\'\\\\"]*)[\'\\\\"]"""', (8, 69, 8, 70): 's', (8, 72, 8, 76): 're.M'}, {}), '(\'^__version__ = [\\\'\\\\"]([^\\\'\\\\"]*)[\\\'\\\\"]\', s, re.M)', False, 'import re\n')] |
PuAnysh/UFPMP-Det | mmdet/core/ufp/__init__.py | 6809b4f8de3aa1d013a3f86114bc3e8496d896a9 | from .spp import *
from .unified_foreground_packing import *
__all__ = [
'phsppog', 'UnifiedForegroundPacking'
]
| [] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.