repo_name
stringlengths 7
94
| repo_path
stringlengths 4
237
| repo_head_hexsha
stringlengths 40
40
| content
stringlengths 10
680k
| apis
stringlengths 2
840k
|
---|---|---|---|---|
Enflow-io/dhf-pay-python | dhf_wrapper/base_client.py | 7c32461d3b2a5018151b2a16a0cc0ad6850b88b1 | from typing import Optional, Callable
import requests
from requests.auth import AuthBase
from requests.exceptions import RequestException
class BearerAuth(AuthBase):
def __init__(self, token):
self.token = token
def __call__(self, r):
r.headers['Authorization'] = f'Bearer {self.token}'
return r
class ServiceClient:
DEFAULT_MAX_RETRIES = 0
def __init__(
self,
base_url: str,
token: Optional[str] = None,
):
self.base_url = base_url.rstrip("/")
self.token = token
self.session = self._create_client_session()
def _dispose(self):
"""
Class method to close user session
"""
self.session.close()
def _create_client_session(self):
"""
Class method to create client session
"""
session = requests.Session()
session.auth = self._get_http_auth()
return session
def _get_http_auth(self):
"""
Class method to resolve http authentication
"""
if self.token:
return BearerAuth(self.token)
def make_full_url(self, path: str) -> str:
"""
Class method to make full url
:param path: str
:return: str
"""
return f"{self.base_url}{path}"
def _make_request(self, request: Callable, retries=DEFAULT_MAX_RETRIES, **kwargs) -> dict:
"""
Class method to make request
:param request: Callable
:return: dict
"""
try:
with request(**kwargs) as resp:
resp.raise_for_status()
return resp.json()
except RequestException as e:
if retries > 0 and e.request.status >= 500:
return self._make_request(request=request, retries=retries - 1, **kwargs)
else:
raise e
| [((39, 18, 39, 36), 'requests.Session', 'requests.Session', ({}, {}), '()', False, 'import requests\n')] |
Edinburgh-Genome-Foundry/Flametree | flametree/utils.py | a189de5d83ca1eb3526a439320e41df9e2a1162e | import os
import shutil
from .ZipFileManager import ZipFileManager
from .DiskFileManager import DiskFileManager
from .Directory import Directory
import string
printable = set(string.printable) - set("\x0b\x0c")
def is_hex(s):
return any(c not in printable for c in s)
def file_tree(target, replace=False):
"""Open a connection to a file tree which can be either a disk folder, a
zip archive, or an in-memory zip archive.
Parameters
----------
target
Either the path to a target folder, or a zip file, or '@memory' to write
a zip file in memory (at which case a string of the zip file is returned)
If the target is already a flametree directory, it is returned as-is.
replace
If True, will remove the target if it already exists. If False, new files
will be written inside the target and some files may be overwritten.
"""
if isinstance(target, Directory):
return target
if (not isinstance(target, str)) or is_hex(target):
return Directory(file_manager=ZipFileManager(source=target))
elif target == "@memory":
return Directory("@memory", file_manager=ZipFileManager("@memory"))
elif target.lower().endswith(".zip"):
return Directory(target, file_manager=ZipFileManager(target, replace=replace))
else:
return Directory(target, file_manager=DiskFileManager(target))
| [] |
artigianitecnologici/marrtino_apps | audio/audio_client.py | b58bf4daa1d06db2f1c8a47be02b29948d41f48d | import sys
import socket
import time
ip = '127.0.0.1'
port = 9001
if (len(sys.argv)>1):
ip = sys.argv[1]
if (len(sys.argv)>2):
port = int(sys.argv[2])
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ip,port))
sock.send('bip\n\r')
data = sock.recv(80)
print data
sock.send('TTS[it-IT] ciao, come stai?\n\r')
data = sock.recv(80)
print data
sock.send('TTS[en-US] very well, thank you!\n\r')
data = sock.recv(80)
print data
sock.send('TTS default language is english!\n\r')
data = sock.recv(80)
print data
sock.send('bop\n\r')
data = sock.recv(80)
print data
time.sleep(1)
sock.close()
| [] |
yulinfeng000/qmotor | qmotor/message/matcher.py | ad3e9eea291f5b87e09fcdd5e42f1eb13d752565 | from abc import ABC, abstractmethod
from typing import List
from .common import (
AtCell,
BasicMessage,
GroupMessage,
FriendMessage,
MsgCellType,
MessageType,
PlainCell,
)
from ..utils import is_str_blank, str_contains
class MsgMatcher(ABC):
def msg_chain_from_ctx(self, ctx):
return BasicMessage(ctx.msg).messageChain()
def get_cell_type(self, msg_cell):
return msg_cell.get("type", None)
@abstractmethod
def match(self, ctx) -> bool:
pass
class GroupMsg(MsgMatcher):
def match(self, ctx) -> bool:
return BasicMessage(ctx.msg).type() == MessageType.GroupMessage
class FriendMsg(MsgMatcher):
def match(self, ctx) -> bool:
return BasicMessage(ctx.msg).type() == MessageType.FriendMessage
class TempMsg(MsgMatcher):
def match(self, ctx) -> bool:
return BasicMessage(ctx.msg).type() == MessageType.TempMessage
class AtMsg(GroupMsg):
def match(self, ctx) -> bool:
if not super().match(ctx):
return False
msg_chain = self.msg_chain_from_ctx(ctx)
return self.get_cell_type(msg_chain[1]) == MsgCellType.At
class AtMeMsg(AtMsg):
me_qq: int
def __init__(self, me_qq) -> None:
super(AtMeMsg, self).__init__()
self.me_qq = me_qq
def match(self, ctx) -> bool:
if not super().match(ctx):
return False
msg_chain = GroupMessage(ctx.msg).messageChain()
at = AtCell(msg_chain[1])
return self.me_qq == at.target()
class JustAtMeMsg(AtMeMsg):
def __init__(self, me_qq) -> None:
super(JustAtMeMsg, self).__init__(me_qq)
def match(self, ctx) -> bool:
if not super().match(ctx):
return False
msg_chain = self.msg_chain_from_ctx(ctx)
plain = PlainCell(msg_chain[2])
return is_str_blank(plain.text())
class AtMeCmdMsg(AtMeMsg):
cmd_list: List[str]
def __init__(self, me_qq, cmd) -> None:
super(AtMeCmdMsg, self).__init__(me_qq)
self.cmd_list = cmd
def match(self, ctx) -> bool:
if not super().match(ctx):
return False
msg_chain = self.msg_chain_from_ctx(ctx)
return str_contains(PlainCell(msg_chain[2]).text(), self.cmd_list)
class SpecificFriendMsg(FriendMsg):
friend_qq: int
def __init__(self, friend_qq) -> None:
super(SpecificFriendMsg, self).__init__()
self.friend_qq = friend_qq
def match(self, ctx) -> bool:
if not super().match(ctx):
return False
return self.friend_qq == FriendMessage(ctx.msg).friend_qq()
class SpecificGroupMsg(GroupMsg):
group_qq: int
def __init__(self, group_qq) -> None:
super(SpecificGroupMsg, self).__init__()
self.group_qq = group_qq
def match(self, ctx) -> bool:
if not super().match(ctx):
return False
return self.group_qq == GroupMessage(ctx.msg).group_qq()
if __name__ == "__main__":
msg_matcher = JustAtMeMsg(123)
class Ctx:
def __init__(self, msg) -> None:
self.msg = msg
msg = {
"type": "GroupMessage",
"sender": {"id": 123, "nickname": "", "remark": ""},
"messageChain": [
{"type": "Source", "id": 123456, "time": 123456},
{"type": "At", "target": 1234, "display": "@Mirai"},
{"type": "Plain", "text": " "},
],
}
print(msg_matcher.match(Ctx(msg)))
| [] |
Atri10/Leet-code---Atri_Patel | invert-binary-tree/invert-binary-tree.py | 49fc59b9147a44ab04a66128fbb2ef259b5f7b7c | class Solution:
def invertTree(self, root: Optional[TreeNode]) -> Optional[TreeNode]:
if root:
root.left,root.right = self.invertTree(root.right),self.invertTree(root.left)
return root
return None | [] |
sinahmr/childf | main/admin.py | 4e01f46867425b36b6431713b79debf585d69d37 | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as DjangoUserAdmin
from django.contrib.auth.models import Group
from django.utils.translation import ugettext_lazy as _
from main.models import UserInfo, User, Child, Volunteer, Donor, Letter, Need, PurchaseForInstitute, PurchaseForNeed, \
Activity, OngoingUserInfo
@admin.register(User)
class UserAdmin(DjangoUserAdmin):
class UserInfoInline(admin.TabularInline):
model = UserInfo
extra = 1
max_num = 1
fieldsets = (
(None, {'fields': ('email', 'password')}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser')}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2'),
}),
)
list_display = ('email', 'userinfo', 'is_staff')
search_fields = ('email', 'userinfo__first_name', 'userinfo__last_name')
ordering = ('email',)
inlines = [UserInfoInline]
admin.site.unregister(Group)
admin.site.register(Child)
admin.site.register(Volunteer)
admin.site.register(Donor)
admin.site.register(Letter)
admin.site.register(Need)
admin.site.register(PurchaseForInstitute)
admin.site.register(PurchaseForNeed)
admin.site.register(Activity)
admin.site.register(OngoingUserInfo)
| [((10, 1, 10, 21), 'django.contrib.admin.register', 'admin.register', ({(10, 16, 10, 20): 'User'}, {}), '(User)', False, 'from django.contrib import admin\n'), ((34, 0, 34, 28), 'django.contrib.admin.site.unregister', 'admin.site.unregister', ({(34, 22, 34, 27): 'Group'}, {}), '(Group)', False, 'from django.contrib import admin\n'), ((35, 0, 35, 26), 'django.contrib.admin.site.register', 'admin.site.register', ({(35, 20, 35, 25): 'Child'}, {}), '(Child)', False, 'from django.contrib import admin\n'), ((36, 0, 36, 30), 'django.contrib.admin.site.register', 'admin.site.register', ({(36, 20, 36, 29): 'Volunteer'}, {}), '(Volunteer)', False, 'from django.contrib import admin\n'), ((37, 0, 37, 26), 'django.contrib.admin.site.register', 'admin.site.register', ({(37, 20, 37, 25): 'Donor'}, {}), '(Donor)', False, 'from django.contrib import admin\n'), ((38, 0, 38, 27), 'django.contrib.admin.site.register', 'admin.site.register', ({(38, 20, 38, 26): 'Letter'}, {}), '(Letter)', False, 'from django.contrib import admin\n'), ((39, 0, 39, 25), 'django.contrib.admin.site.register', 'admin.site.register', ({(39, 20, 39, 24): 'Need'}, {}), '(Need)', False, 'from django.contrib import admin\n'), ((40, 0, 40, 41), 'django.contrib.admin.site.register', 'admin.site.register', ({(40, 20, 40, 40): 'PurchaseForInstitute'}, {}), '(PurchaseForInstitute)', False, 'from django.contrib import admin\n'), ((41, 0, 41, 36), 'django.contrib.admin.site.register', 'admin.site.register', ({(41, 20, 41, 35): 'PurchaseForNeed'}, {}), '(PurchaseForNeed)', False, 'from django.contrib import admin\n'), ((42, 0, 42, 29), 'django.contrib.admin.site.register', 'admin.site.register', ({(42, 20, 42, 28): 'Activity'}, {}), '(Activity)', False, 'from django.contrib import admin\n'), ((43, 0, 43, 36), 'django.contrib.admin.site.register', 'admin.site.register', ({(43, 20, 43, 35): 'OngoingUserInfo'}, {}), '(OngoingUserInfo)', False, 'from django.contrib import admin\n'), ((19, 9, 19, 25), 'django.utils.translation.ugettext_lazy', '_', ({(19, 11, 19, 24): '"""Permissions"""'}, {}), "('Permissions')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((20, 9, 20, 29), 'django.utils.translation.ugettext_lazy', '_', ({(20, 11, 20, 28): '"""Important dates"""'}, {}), "('Important dates')", True, 'from django.utils.translation import ugettext_lazy as _\n')] |
MoyTW/RL_Arena_Experiment | hunting/display/render.py | fb79c67576cd4de3e4a58278b4515098f38fb584 | import tdl
import time
import hunting.constants as c
class Renderer:
def __init__(self, main_console=None, level_display_width=c.SCREEN_WIDTH, level_display_height=c.SCREEN_HEIGHT):
if main_console is None:
self.main_console = tdl.init(level_display_width, level_display_height, 'From Renderer Default Constructor')
else:
self.main_console = main_console
self.level_display_width = level_display_width
self.level_display_height = level_display_height
self._level_console = tdl.Console(level_display_width, level_display_height)
def _render_level(self, con, level):
for x in range(level.width):
for y in range(level.height):
if level[x][y].blocks is not False:
self._level_console.draw_rect(x, y, 1, 1, None, bg=[120, 0, 50])
else:
self._level_console.draw_rect(x, y, 1, 1, None, bg=[30, 255, 30])
# TODO: This is pretty hacky!
i = 1
for o in level._all_objects:
if o.faction == '1': # TODO: Better faction implementation!
color = [255, 0, 0]
else:
color = [0, 0, 255]
self._level_console.draw_char(o.x, o.y, i, color)
i += 1
con.blit(self._level_console)
def render_all(self, level):
self._render_level(self.main_console, level)
tdl.flush()
def clear(self, level):
for o in level._all_objects:
self._level_console.draw_char(o.x, o.y, ' ')
def render_event(self, level, event):
if event[c.EVENT_TYPE] == c.MOVEMENT_EVENT:
# Clear previous location
self._level_console.draw_char(event[c.MOVEMENT_PREV_X], event[c.MOVEMENT_PREV_Y], ' ', bg=[0, 15, 7])
# Retrieve faction and color
o = level.get_object_by_id(event[c.OBJ_ID])
if o.faction == '1': # TODO: Better faction implementation!
color = [255, 0, 0]
else:
color = [0, 0, 255]
self._level_console.draw_char(event[c.OBJ_X], event[c.OBJ_Y], o.faction, fg=color)
elif event[c.EVENT_TYPE] == c.OBJECT_DESTRUCTION_EVENT:
self._level_console.draw_char(event[c.OBJ_X], event[c.OBJ_Y], ' ', bg=[0, 15, 7])
# Render
self.main_console.blit(self._level_console)
tdl.flush()
def visualize(level, show_time=1):
Renderer().render_all(level)
time.sleep(show_time) | [((68, 4, 68, 25), 'time.sleep', 'time.sleep', ({(68, 15, 68, 24): 'show_time'}, {}), '(show_time)', False, 'import time\n'), ((14, 30, 14, 84), 'tdl.Console', 'tdl.Console', ({(14, 42, 14, 61): 'level_display_width', (14, 63, 14, 83): 'level_display_height'}, {}), '(level_display_width, level_display_height)', False, 'import tdl\n'), ((38, 8, 38, 19), 'tdl.flush', 'tdl.flush', ({}, {}), '()', False, 'import tdl\n'), ((63, 8, 63, 19), 'tdl.flush', 'tdl.flush', ({}, {}), '()', False, 'import tdl\n'), ((9, 32, 9, 120), 'tdl.init', 'tdl.init', ({(9, 41, 9, 60): 'level_display_width', (9, 62, 9, 82): 'level_display_height', (9, 84, 9, 119): '"""From Renderer Default Constructor"""'}, {}), "(level_display_width, level_display_height,\n 'From Renderer Default Constructor')", False, 'import tdl\n')] |
neosergio/hackatrix-api | ideas/models.py | 27f0180415efa97bd7345d100b314d8807486b67 | from django.db import models
class Idea(models.Model):
title = models.CharField(max_length=255, unique=True)
description = models.TextField()
author = models.OneToOneField('events.Registrant',
related_name='author_idea',
on_delete=models.CASCADE,
blank=True,
null=True)
written_by = models.ForeignKey('users.User',
related_name='written_idea',
on_delete=models.CASCADE,
blank=True,
null=True)
event = models.ForeignKey('events.Event',
related_name='event_idea',
on_delete=models.CASCADE,
blank=True,
null=True)
is_valid = models.BooleanField(default=False)
max_number_of_participants = models.PositiveIntegerField(default=7)
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(auto_now=True)
is_active = models.BooleanField(default=True)
class Meta():
ordering = ['-created_at', '-id']
def __str__(self):
return self.title
class IdeaTeamMember(models.Model):
idea = models.ForeignKey(Idea, related_name='idea_team_member', on_delete=models.CASCADE)
member = models.OneToOneField('events.Registrant', related_name='member_idea', on_delete=models.CASCADE)
class Meta():
ordering = ['idea']
unique_together = ('idea', 'member')
verbose_name = 'Team Member'
verbose_name_plural = 'Groups'
| [((5, 12, 5, 57), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((6, 18, 6, 36), 'django.db.models.TextField', 'models.TextField', ({}, {}), '()', False, 'from django.db import models\n'), ((7, 13, 11, 44), 'django.db.models.OneToOneField', 'models.OneToOneField', (), '', False, 'from django.db import models\n'), ((12, 17, 16, 45), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import models\n'), ((17, 12, 21, 40), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import models\n'), ((22, 15, 22, 49), 'django.db.models.BooleanField', 'models.BooleanField', (), '', False, 'from django.db import models\n'), ((23, 33, 23, 71), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', (), '', False, 'from django.db import models\n'), ((24, 17, 24, 56), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import models\n'), ((25, 18, 25, 53), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import models\n'), ((26, 16, 26, 49), 'django.db.models.BooleanField', 'models.BooleanField', (), '', False, 'from django.db import models\n'), ((36, 11, 36, 93), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import models\n'), ((37, 13, 37, 108), 'django.db.models.OneToOneField', 'models.OneToOneField', (), '', False, 'from django.db import models\n')] |
ssheikh85/AIHCND_c3_3d_imaging | section2/out/src/data_prep/SlicesDataset.py | 6502985d4199244328a683459b4d819090d58f3c | """
Module for Pytorch dataset representations
"""
import torch
from torch.utils.data import Dataset
class SlicesDataset(Dataset):
"""
This class represents an indexable Torch dataset
which could be consumed by the PyTorch DataLoader class
"""
def __init__(self, data):
self.data = data
self.slices = []
for i, d in enumerate(data):
for j in range(d["image"].shape[0]):
self.slices.append((i, j))
def __getitem__(self, idx):
"""
This method is called by PyTorch DataLoader class to return a sample with id idx
Arguments:
idx {int} -- id of sample
Returns:
Dictionary of 2 Torch Tensors of dimensions [1, W, H]
"""
slc = self.slices[idx]
sample = dict()
sample["id"] = idx
# You could implement caching strategy here if dataset is too large to fit
# in memory entirely
# Also this would be the place to call transforms if data augmentation is used
# TASK: Create two new keys in the "sample" dictionary, named "image" and "seg"
# The values are 3D Torch Tensors with image and label data respectively.
# First dimension is size 1, and last two hold the voxel data from the respective
# slices. Write code that stores the 2D slice data in the last 2 dimensions of the 3D Tensors.
# Your tensor needs to be of shape [1, patch_size, patch_size]
# Don't forget that you need to put a Torch Tensor into your dictionary element's value
# Hint: your 3D data sits in self.data variable, the id of the 3D volume from data array
# and the slice number are in the slc variable.
# Hint2: You can use None notation like so: arr[None, :] to add size-1
# dimension to a Numpy array
# <YOUR CODE GOES HERE>
img = self.data[slc[0]]["image"][slc[1]]
sample['image'] = torch.from_numpy(img[None,:])
seg = self.data[slc[0]]["seg"][slc[1]]
sample['seg'] = torch.from_numpy(seg[None,:])
return sample
def __len__(self):
"""
This method is called by PyTorch DataLoader class to return number of samples in the dataset
Returns:
int
"""
return len(self.slices)
| [((53, 26, 53, 55), 'torch.from_numpy', 'torch.from_numpy', ({(53, 43, 53, 54): 'img[(None), :]'}, {}), '(img[(None), :])', False, 'import torch\n'), ((56, 24, 56, 53), 'torch.from_numpy', 'torch.from_numpy', ({(56, 41, 56, 52): 'seg[(None), :]'}, {}), '(seg[(None), :])', False, 'import torch\n')] |
uktrade/zenslackchat | zenslackchat/zendesk_webhooks.py | 8071757e1ea20a433783c6a7c47f25b046692682 | from zenslackchat.zendesk_base_webhook import BaseWebHook
from zenslackchat.zendesk_email_to_slack import email_from_zendesk
from zenslackchat.zendesk_comments_to_slack import comments_from_zendesk
class CommentsWebHook(BaseWebHook):
"""Handle Zendesk Comment Events.
"""
def handle_event(self, event, slack_client, zendesk_client):
"""Handle the comment trigger event we have been POSTed.
Recover and update the comments with lastest from Zendesk.
"""
comments_from_zendesk(event, slack_client, zendesk_client)
class EmailWebHook(BaseWebHook):
"""Handle Zendesk Email Events.
"""
def handle_event(self, event, slack_client, zendesk_client):
"""Handle an email created issue and create it on slack.
"""
email_from_zendesk(event, slack_client, zendesk_client)
| [((15, 8, 15, 66), 'zenslackchat.zendesk_comments_to_slack.comments_from_zendesk', 'comments_from_zendesk', ({(15, 30, 15, 35): 'event', (15, 37, 15, 49): 'slack_client', (15, 51, 15, 65): 'zendesk_client'}, {}), '(event, slack_client, zendesk_client)', False, 'from zenslackchat.zendesk_comments_to_slack import comments_from_zendesk\n'), ((24, 8, 24, 63), 'zenslackchat.zendesk_email_to_slack.email_from_zendesk', 'email_from_zendesk', ({(24, 27, 24, 32): 'event', (24, 34, 24, 46): 'slack_client', (24, 48, 24, 62): 'zendesk_client'}, {}), '(event, slack_client, zendesk_client)', False, 'from zenslackchat.zendesk_email_to_slack import email_from_zendesk\n')] |
groupdocs-merger-cloud/groupdocs-merger-cloud-python-samples | Examples/PagesOperations/MovePage.py | af736c94240eeefef28bd81012c96ab2ea779088 | # Import modules
import groupdocs_merger_cloud
from Common import Common
# This example demonstrates how to move document page to a new position
class MovePage:
@classmethod
def Run(cls):
pagesApi = groupdocs_merger_cloud.PagesApi.from_config(Common.GetConfig())
options = groupdocs_merger_cloud.MoveOptions()
options.file_info = groupdocs_merger_cloud.FileInfo("WordProcessing/four-pages.docx")
options.output_path = "Output/move-pages.docx"
options.page_number = 1
options.new_page_number = 2
result = pagesApi.move(groupdocs_merger_cloud.MoveRequest(options))
print("Output file path = " + result.path) | [((11, 18, 11, 54), 'groupdocs_merger_cloud.MoveOptions', 'groupdocs_merger_cloud.MoveOptions', ({}, {}), '()', False, 'import groupdocs_merger_cloud\n'), ((12, 28, 12, 93), 'groupdocs_merger_cloud.FileInfo', 'groupdocs_merger_cloud.FileInfo', ({(12, 60, 12, 92): '"""WordProcessing/four-pages.docx"""'}, {}), "('WordProcessing/four-pages.docx')", False, 'import groupdocs_merger_cloud\n'), ((9, 63, 9, 81), 'Common.Common.GetConfig', 'Common.GetConfig', ({}, {}), '()', False, 'from Common import Common\n'), ((17, 31, 17, 74), 'groupdocs_merger_cloud.MoveRequest', 'groupdocs_merger_cloud.MoveRequest', ({(17, 66, 17, 73): 'options'}, {}), '(options)', False, 'import groupdocs_merger_cloud\n')] |
joseluistello/Regression-Analysis-Apple-Data | src/models/predict_model.py | 85952edd22ba8c382f43357efc510763185fd6d1 | y_pred=ml.predict(x_test)
print(y_pred)
from sklearn.metrics import r2_score
r2_score(y_test,y_pred)
pred_y_df=pd.DataFrame({'Actual Value':y_test,'Predicted Value':y_pred, 'Difference': y_test-y_pred})
pred_y_df[0:20] | [((5, 0, 5, 23), 'sklearn.metrics.r2_score', 'r2_score', ({(5, 9, 5, 15): 'y_test', (5, 16, 5, 22): 'y_pred'}, {}), '(y_test, y_pred)', False, 'from sklearn.metrics import r2_score\n')] |
Soufiane-Fartit/cars-prices | src/models/utils_func.py | 8eee8aa168251adab7f4947c45a78752e4145041 | # -*- coding: utf-8 -*-
""" This module offers util functions to be called and used
in other modules
"""
from datetime import datetime
import os
import json
import pickle
import string
import random
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
from sklearn import tree
def id_generator(size=6, chars=string.ascii_lowercase + string.digits):
"""GENERATE A RANDOM STRING TO BE USED AS AN ID
Args:
size (int, optional): size of the string. Defaults to 6.
chars (str, optional): charachters to be used to generate the string.
Defaults to string.ascii_lowercase+string.digits.
Returns:
[str]: a random chain of charachters
"""
return "".join(random.choice(chars) for _ in range(size))
def save_model(path, model):
"""SAVE MODEL INTO PICKLE FILE
Args:
path (str): path where to save the model
model (binary): the model to be saved
"""
with open(path, "wb") as file:
pickle.dump(model, file)
def update_history(models_hist_path, model_id, model_name, model, params):
"""SAVE METADATA RELATED TO THE TRAINED MODEL INTO THE HISTORY FILE
Args:
models_hist_path (str): path to the history file
model_id (str): unique id of the model
model_name (str): model name = "model_"+model_id+".pkl"
model (binary): binary file of the model
params (dict): dictionnary containing the hyper-parameters
used to fit the model
"""
model_metadata = dict()
model_metadata["trained"] = str(datetime.now())
model_metadata["model_type"] = type(model).__name__
model_metadata["model_id"] = model_id
model_metadata["params"] = params
print(model_metadata)
with open(models_hist_path, "r+") as outfile:
try:
hist = json.load(outfile)
hist[model_name] = model_metadata
outfile.seek(0)
json.dump(hist, outfile, indent=4)
except json.decoder.JSONDecodeError:
json.dump({model_name: model_metadata}, outfile, indent=4)
def update_history_add_eval(
models_hist_path, model_id=None, model_name=None, metrics=None
):
"""ADD EVALUATION METRICS THE HISTORY FILE FOR THE SPECIFIED MODEL
Args:
models_hist_path (str): path to the history file
model_id (str, optional): the id of the model. Defaults to None.
model_name (str, optional): the name of the model. Defaults to None.
metrics (dict, optional): a dictionnary containing metadata related
to the model evaluation. Defaults to None.
"""
assert (
model_id is not None or model_name is not None
), "At least the model id or name must be given"
assert models_hist_path is not None, "You must specify the path to the history file"
if not model_name:
model_name = "model_" + model_id + ".pkl"
eval_metadata = dict()
eval_metadata["datetime"] = str(datetime.now())
eval_metadata["metrics"] = metrics
with open(models_hist_path, "r+") as outfile:
try:
hist = json.load(outfile)
hist[model_name]["evaluation"] = eval_metadata
outfile.seek(0)
json.dump(hist, outfile, indent=4)
except json.decoder.JSONDecodeError:
print("cannot save evaluation metadata")
def generate_features_importance_plot(model, features, model_id):
"""GENERATES A PLOT DESCRIBING FEATURES IMPORTANCE FOR THE MODEL
TO MAKE THE PREDICTION.
Args:
model (tree-based model): a tree based model (decision tree, random forest ...)
features (pandas dataframe): a table of the features on which we trained the model
model_id (str): the unique id of the model
"""
mean_importances = model.feature_importances_
importances_indices = np.argsort(mean_importances)[::-1]
ordered_columns = [features.columns[i] for i in importances_indices]
importances = pd.DataFrame(
[tree.feature_importances_ for tree in model.estimators_],
columns=features.columns,
)
importances = importances[ordered_columns]
_, ax = plt.subplots(figsize=(12, 8))
sns.boxplot(x="variable", y="value", ax=ax, data=pd.melt(importances))
figure = ax.get_figure()
figure.savefig(
"models/models-training/run_" + model_id + "/features_importance.png"
)
def plot_trees(rf, feature_names, target_names, model_id):
"""GENERATES A PLOT THAT SHOWS THE DECISION MAKING OF THE TREES
Args:
rf (model): a tree based model (random forest ...)
feature_names (list): names of the columns of the training set
target_names (str): name of the target columns
model_id (str): unique id of the model
"""
fn = feature_names
cn = target_names
fig, axes = plt.subplots(nrows=1, ncols=5, figsize=(10, 2), dpi=900)
for index in range(0, 5):
tree.plot_tree(
rf.estimators_[index],
feature_names=fn,
class_names=cn,
filled=True,
ax=axes[index],
)
axes[index].set_title("Estimator: " + str(index), fontsize=11)
fig.savefig("models/models-training/run_" + model_id + "/Trees.png")
def get_id_list(N=6):
print (os.getcwd())
print([x[0] for x in os.walk("../../models/models-training")])
return [x[0][-N:] for x in os.walk("../../models/models-training")][1:] | [((122, 18, 125, 5), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((127, 12, 127, 41), 'matplotlib.pyplot.subplots', 'plt.subplots', (), '', True, 'from matplotlib import pyplot as plt\n'), ((146, 16, 146, 72), 'matplotlib.pyplot.subplots', 'plt.subplots', (), '', True, 'from matplotlib import pyplot as plt\n'), ((43, 8, 43, 32), 'pickle.dump', 'pickle.dump', ({(43, 20, 43, 25): 'model', (43, 27, 43, 31): 'file'}, {}), '(model, file)', False, 'import pickle\n'), ((59, 36, 59, 50), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n'), ((97, 36, 97, 50), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n'), ((120, 26, 120, 54), 'numpy.argsort', 'np.argsort', ({(120, 37, 120, 53): 'mean_importances'}, {}), '(mean_importances)', True, 'import numpy as np\n'), ((148, 8, 154, 9), 'sklearn.tree.plot_tree', 'tree.plot_tree', (), '', False, 'from sklearn import tree\n'), ((161, 11, 161, 22), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((31, 19, 31, 39), 'random.choice', 'random.choice', ({(31, 33, 31, 38): 'chars'}, {}), '(chars)', False, 'import random\n'), ((67, 19, 67, 37), 'json.load', 'json.load', ({(67, 29, 67, 36): 'outfile'}, {}), '(outfile)', False, 'import json\n'), ((70, 12, 70, 46), 'json.dump', 'json.dump', (), '', False, 'import json\n'), ((102, 19, 102, 37), 'json.load', 'json.load', ({(102, 29, 102, 36): 'outfile'}, {}), '(outfile)', False, 'import json\n'), ((105, 12, 105, 46), 'json.dump', 'json.dump', (), '', False, 'import json\n'), ((128, 53, 128, 73), 'pandas.melt', 'pd.melt', ({(128, 61, 128, 72): 'importances'}, {}), '(importances)', True, 'import pandas as pd\n'), ((72, 12, 72, 70), 'json.dump', 'json.dump', (), '', False, 'import json\n'), ((162, 25, 162, 64), 'os.walk', 'os.walk', ({(162, 33, 162, 63): '"""../../models/models-training"""'}, {}), "('../../models/models-training')", False, 'import os\n'), ((163, 31, 163, 70), 'os.walk', 'os.walk', ({(163, 39, 163, 69): '"""../../models/models-training"""'}, {}), "('../../models/models-training')", False, 'import os\n')] |
KpaBap/palbot | modules/finance.py | 38d2b7958e310f45a28cf1b3173967b92f819946 | import asyncio
import discord
from discord.ext import commands
import re
import sqlite3
from urllib.parse import quote as uriquote
import html
CURR = ["AUD", "BRL", "CAD", "CHF", "CLP", "CNY", "CZK", "DKK", "EUR",
"GBP", "HKD", "HUF", "IDR", "ILS", "INR", "JPY", "KRW", "MXN",
"MYR", "NOK", "NZD", "PHP", "PKR", "PLN", "RUB", "SEK", "SGD",
"THB", "TRY", "TWD", "ZAR"]
class Finance(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def coin(self, ctx, *, line: str):
"""Look up a cryptocurrency such as Bitcoin
Optionally specify a quantity such as `0.6 ETH`
Optionally specify a conversion value such as `2 BTC in ETH` or `ETH in CAD`"""
coin = await self.parse_coinline(line)
if not coin:
await ctx.send(f"Unable to find coin {line}")
return
url = f"https://api.coinmarketcap.com/v1/ticker/{coin['coin']}{coin['currency']}"
async with self.bot.session.get(url) as resp:
data = await resp.json()
data = data[0]
cid = data['symbol'].upper()
name = data['name']
pUSD = data['price_usd']
pC24 = data['percent_change_24h']
pC1 = data['percent_change_1h']
output = ""
if coin.get('cvtto', ''):
cvtval = await self.convert_coin(coin, data)
if not cvtval:
await ctx.send(f"Failed to look up {coin['cvtto']}")
return
if coin['qty'] == 1:
output = "{} {} | Value: {} {} (${} USD) | 1-hour change: {}% | 24-hour change: {}%".format(cid, name, cvtval, coin['cvtto'].upper(), pUSD, pC1, pC24)
else:
usdfinal = float(pUSD) * coin['qty']
output = "{} {} : {} {} (${:.2f} USD)".format(coin['qty'], cid, cvtval, coin['cvtto'].upper(), usdfinal)
else:
if coin['qty'] == 1:
output = "{} {} | Value: ${} | 1-hour change: {}% | 24-hour change: {}%".format(cid, name, pUSD, pC1, pC24)
else:
finalprice = float(pUSD) * coin['qty']
output = "{} {} : ${:.2f}".format(coin['qty'], cid, finalprice)
if output:
await ctx.send(output)
async def convert_coin(self, coin, data):
if coin['currency']:
cvtval = "{:.2f}".format(float(data['price_{}'.format(coin['cvtto'].lower())]) * coin['qty'])
else:
if not coin['cvtto']:
cvtval = ''
if coin['cvtto'] == "bitcoin":
#api gives us BTC by default
cvtval = self.ffstr(float(data['price_btc']) * coin['qty'])
coin['cvtto'] = "BTC"
else:
pUSD = data['price_usd']
url = "https://api.coinmarketcap.com/v1/ticker/{}".format(coin['cvtto'])
async with self.bot.session.get(url) as resp:
tojson = await resp.json()
coin['cvtto'] = tojson[0]['symbol'].upper()
toval = float(tojson[0]['price_usd'])
cvtval = self.ffstr((float(pUSD) * coin['qty']) / toval)
return cvtval
def ffstr(self, number):
return "{:.8f}".format(float(number)).rstrip('0').rstrip('.')
async def parse_coinline(self, line):
coinqty = 1
qtycheck = re.search(r"(^(\d*\.)?\d+)\s?(\w.+)", line)
if qtycheck:
coinqty = float(qtycheck.group(1))
line = qtycheck.group(3).strip()
curr = ""
cvtto = ""
if " in " in line or " to " in line:
if " in " in line:
coin, cvtto = line.split(" in ")
elif " to " in line:
coin, cvtto = line.split(" to ")
coinid = await self.findcoin(coin)
if cvtto.upper() in CURR:
curr = "?convert={}".format(cvtto)
else:
cvtto = await self.findcoin(cvtto)
else:
coin = line
coinid = await self.findcoin(coin)
if not coinid:
return None
return {'coin': coinid,
'qty': coinqty,
'currency': curr,
'cvtto': cvtto}
async def findcoin(self, coin):
conn = sqlite3.connect("coins.sqlite3")
cursor = conn.cursor()
result = cursor.execute("SELECT coinid FROM coins WHERE coinid = (?) OR symbol = (?)", (coin, coin)).fetchone()
if not result:
like = "%{}%".format(coin)
result = cursor.execute("SELECT coinid FROM coins WHERE name LIKE (?)", [like]).fetchone()
if result:
return result[0]
@commands.command(hidden=True)
@commands.is_owner()
async def newcoins(self, ctx):
conn = sqlite3.connect("coins.sqlite3")
cursor = conn.cursor()
result = cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='coins';").fetchone()
if not result:
cursor.execute("CREATE TABLE 'coins' ('symbol' TEXT, 'coinid' TEXT UNIQUE ON CONFLICT REPLACE, 'name' TEXT);")
conn.commit()
url = "https://api.coinmarketcap.com/v1/ticker/?limit=0"
async with self.bot.session.get(url) as resp:
data = await resp.json()
for coin in data:
sym = coin['symbol'].lower()
cid = coin['id'].lower()
name = coin['name'].lower()
cursor.execute("insert into coins values (?, ?, ?)", (sym,cid,name))
conn.commit()
conn.close()
@commands.command(aliases=['stonks', 'stocks'])
async def stock (self, ctx, name: str):
"""Look up a stock and show its current price, change, etc"""
symbol = ""
url = f"https://autoc.finance.yahoo.com/autoc?query={uriquote(name)}®ion=1&lang=en&guccounter=1"
async with self.bot.session.get(url) as resp:
data = await resp.json()
symbol = data['ResultSet']['Result'][0]['symbol']
if not symbol:
await ctx.send(f"Unable to find a stonk named `{name}`")
return
url = f"http://query1.finance.yahoo.com/v7/finance/quote?symbols={symbol}"
async with self.bot.session.get(url) as resp:
data = await resp.json()
data = data["quoteResponse"]["result"][0]
downup = "\N{CHART WITH UPWARDS TREND}" if data['regularMarketChange'] > 0 else "\N{CHART WITH DOWNWARDS TREND}"
outstr = "{}{}: {} {} :: Today's change: {:.2f} ({:.2f}%) {}"
longn = ' ({})'.format(data['shortName']) if 'shortName' in data else ''
outstr = outstr.format(data['symbol'], longn, data['regularMarketPrice'], data['currency'],
float(data['regularMarketChange']), float(data['regularMarketChangePercent']),
downup)
if 'postMarketPrice' in data and (data['marketState'] == "CLOSED" or "POST" in data['marketState']):
pdu = "\N{CHART WITH UPWARDS TREND}" if data['postMarketChange'] > 0 else "\N{CHART WITH DOWNWARDS TREND}"
outstr += " :: After Hours: {:.2f} - Change: {:.2f} {}".format(data['postMarketPrice'],
data['postMarketChange'], pdu)
await ctx.send(html.unescape(outstr))
def setup(bot):
bot.add_cog(Finance(bot))
| [((21, 5, 21, 23), 'discord.ext.commands.command', 'commands.command', ({}, {}), '()', False, 'from discord.ext import commands\n'), ((129, 5, 129, 34), 'discord.ext.commands.command', 'commands.command', (), '', False, 'from discord.ext import commands\n'), ((130, 5, 130, 24), 'discord.ext.commands.is_owner', 'commands.is_owner', ({}, {}), '()', False, 'from discord.ext import commands\n'), ((151, 5, 151, 51), 'discord.ext.commands.command', 'commands.command', (), '', False, 'from discord.ext import commands\n'), ((90, 19, 90, 62), 're.search', 're.search', ({(90, 29, 90, 55): '"""(^(\\\\d*\\\\.)?\\\\d+)\\\\s?(\\\\w.+)"""', (90, 57, 90, 61): 'line'}, {}), "('(^(\\\\d*\\\\.)?\\\\d+)\\\\s?(\\\\w.+)', line)", False, 'import re\n'), ((120, 15, 120, 47), 'sqlite3.connect', 'sqlite3.connect', ({(120, 31, 120, 46): '"""coins.sqlite3"""'}, {}), "('coins.sqlite3')", False, 'import sqlite3\n'), ((132, 15, 132, 47), 'sqlite3.connect', 'sqlite3.connect', ({(132, 31, 132, 46): '"""coins.sqlite3"""'}, {}), "('coins.sqlite3')", False, 'import sqlite3\n'), ((155, 61, 155, 75), 'urllib.parse.quote', 'uriquote', ({(155, 70, 155, 74): 'name'}, {}), '(name)', True, 'from urllib.parse import quote as uriquote\n'), ((180, 23, 180, 44), 'html.unescape', 'html.unescape', ({(180, 37, 180, 43): 'outstr'}, {}), '(outstr)', False, 'import html\n')] |
shubha1593/MovieReviewAnalysis | SG_GetDataForClassifier.py | c485eea0c8b35e554027cce7a431212b406e672c | from SG_GetFeatureMatrix import *
from SG_VectorY import *
featureMatrix = featureMatrixFromReviews()
Y = getYVector()
def getDataForClassifier() :
return featureMatrix, Y | [] |
Carnales/green-bounty | greenbounty/bounties/migrations/0001_initial.py | beb765082b32c096139463bf75ccc1ec3d530692 | # Generated by Django 3.1.4 on 2021-01-17 19:12
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Organization',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=25, null=True)),
('balance', models.FloatField()),
('total', models.FloatField()),
],
),
migrations.CreateModel(
name='Hunter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, null=True)),
('image', models.ImageField(blank=True, null=True, upload_to='')),
('user', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Bounty',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=25, null=True)),
('image', models.ImageField(blank=True, null=True, upload_to='')),
('price', models.FloatField()),
('city', models.CharField(max_length=25, null=True)),
('hunter', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='bounties.hunter')),
],
),
]
| [((13, 8, 13, 65), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', ({(13, 40, 13, 64): 'settings.AUTH_USER_MODEL'}, {}), '(settings.AUTH_USER_MODEL)', False, 'from django.db import migrations, models\n'), ((20, 23, 20, 112), 'django.db.models.AutoField', 'models.AutoField', (), '', False, 'from django.db import migrations, models\n'), ((21, 25, 21, 67), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((22, 28, 22, 47), 'django.db.models.FloatField', 'models.FloatField', ({}, {}), '()', False, 'from django.db import migrations, models\n'), ((23, 26, 23, 45), 'django.db.models.FloatField', 'models.FloatField', ({}, {}), '()', False, 'from django.db import migrations, models\n'), ((29, 23, 29, 112), 'django.db.models.AutoField', 'models.AutoField', (), '', False, 'from django.db import migrations, models\n'), ((30, 25, 30, 67), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((31, 26, 31, 80), 'django.db.models.ImageField', 'models.ImageField', (), '', False, 'from django.db import migrations, models\n'), ((32, 25, 32, 142), 'django.db.models.OneToOneField', 'models.OneToOneField', (), '', False, 'from django.db import migrations, models\n'), ((38, 23, 38, 112), 'django.db.models.AutoField', 'models.AutoField', (), '', False, 'from django.db import migrations, models\n'), ((39, 25, 39, 67), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((40, 26, 40, 80), 'django.db.models.ImageField', 'models.ImageField', (), '', False, 'from django.db import migrations, models\n'), ((41, 26, 41, 45), 'django.db.models.FloatField', 'models.FloatField', ({}, {}), '()', False, 'from django.db import migrations, models\n'), ((42, 25, 42, 67), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((43, 27, 43, 135), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n')] |
osrg/nova | nova/tests/virt/docker/test_driver.py | 14b6bc655145c832bd9c822e48f877818e0e53ff | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2013 dotCloud, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import socket
import mock
from nova import context
from nova import exception
from nova.openstack.common import jsonutils
from nova.openstack.common import units
from nova import test
from nova.tests import utils
import nova.tests.virt.docker.mock_client
from nova.tests.virt.test_virt_drivers import _VirtDriverTestCase
from nova.virt.docker import hostinfo
from nova.virt.docker import network
class DockerDriverTestCase(_VirtDriverTestCase, test.TestCase):
driver_module = 'nova.virt.docker.DockerDriver'
def setUp(self):
super(DockerDriverTestCase, self).setUp()
self.stubs.Set(nova.virt.docker.driver.DockerDriver,
'docker',
nova.tests.virt.docker.mock_client.MockClient())
def fake_setup_network(self, instance, network_info):
return
self.stubs.Set(nova.virt.docker.driver.DockerDriver,
'_setup_network',
fake_setup_network)
def fake_get_registry_port(self):
return 5042
self.stubs.Set(nova.virt.docker.driver.DockerDriver,
'_get_registry_port',
fake_get_registry_port)
# Note: using mock.object.path on class throws
# errors in test_virt_drivers
def fake_teardown_network(container_id):
return
self.stubs.Set(network, 'teardown_network', fake_teardown_network)
self.context = context.RequestContext('fake_user', 'fake_project')
def test_driver_capabilities(self):
self.assertFalse(self.connection.capabilities['has_imagecache'])
self.assertFalse(self.connection.capabilities['supports_recreate'])
#NOTE(bcwaldon): This exists only because _get_running_instance on the
# base class will not let us set a custom disk/container_format.
def _get_running_instance(self, obj=False):
instance_ref = utils.get_test_instance(obj=obj)
network_info = utils.get_test_network_info()
network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \
'1.1.1.1'
image_info = utils.get_test_image_info(None, instance_ref)
image_info['disk_format'] = 'raw'
image_info['container_format'] = 'docker'
self.connection.spawn(self.ctxt, jsonutils.to_primitive(instance_ref),
image_info, [], 'herp', network_info=network_info)
return instance_ref, network_info
def test_get_host_stats(self):
self.mox.StubOutWithMock(socket, 'gethostname')
socket.gethostname().AndReturn('foo')
socket.gethostname().AndReturn('bar')
self.mox.ReplayAll()
self.assertEqual('foo',
self.connection.get_host_stats()['host_hostname'])
self.assertEqual('foo',
self.connection.get_host_stats()['host_hostname'])
def test_get_available_resource(self):
memory = {
'total': 4 * units.Mi,
'free': 3 * units.Mi,
'used': 1 * units.Mi
}
disk = {
'total': 50 * units.Gi,
'available': 25 * units.Gi,
'used': 25 * units.Gi
}
# create the mocks
with contextlib.nested(
mock.patch.object(hostinfo, 'get_memory_usage',
return_value=memory),
mock.patch.object(hostinfo, 'get_disk_usage',
return_value=disk)
) as (
get_memory_usage,
get_disk_usage
):
# run the code
stats = self.connection.get_available_resource(nodename='test')
# make our assertions
get_memory_usage.assert_called_once_with()
get_disk_usage.assert_called_once_with()
expected_stats = {
'vcpus': 1,
'vcpus_used': 0,
'memory_mb': 4,
'memory_mb_used': 1,
'local_gb': 50L,
'local_gb_used': 25L,
'disk_available_least': 25L,
'hypervisor_type': 'docker',
'hypervisor_version': 1000,
'hypervisor_hostname': 'test',
'cpu_info': '?',
'supported_instances': ('[["i686", "docker", "lxc"],'
' ["x86_64", "docker", "lxc"]]')
}
self.assertEqual(expected_stats, stats)
def test_plug_vifs(self):
# Check to make sure the method raises NotImplementedError.
self.assertRaises(NotImplementedError,
self.connection.plug_vifs,
instance=utils.get_test_instance(),
network_info=None)
def test_unplug_vifs(self):
# Check to make sure the method raises NotImplementedError.
self.assertRaises(NotImplementedError,
self.connection.unplug_vifs,
instance=utils.get_test_instance(),
network_info=None)
def test_create_container(self, image_info=None):
instance_href = utils.get_test_instance()
if image_info is None:
image_info = utils.get_test_image_info(None, instance_href)
image_info['disk_format'] = 'raw'
image_info['container_format'] = 'docker'
self.connection.spawn(self.context, instance_href, image_info,
'fake_files', 'fake_password')
self._assert_cpu_shares(instance_href)
def test_create_container_vcpus_2(self, image_info=None):
flavor = utils.get_test_flavor(options={
'name': 'vcpu_2',
'flavorid': 'vcpu_2',
'vcpus': 2
})
instance_href = utils.get_test_instance(flavor=flavor)
if image_info is None:
image_info = utils.get_test_image_info(None, instance_href)
image_info['disk_format'] = 'raw'
image_info['container_format'] = 'docker'
self.connection.spawn(self.context, instance_href, image_info,
'fake_files', 'fake_password')
self._assert_cpu_shares(instance_href, vcpus=2)
def _assert_cpu_shares(self, instance_href, vcpus=4):
container_id = self.connection.find_container_by_name(
instance_href['name']).get('id')
container_info = self.connection.docker.inspect_container(container_id)
self.assertEqual(vcpus * 1024, container_info['Config']['CpuShares'])
def test_create_container_wrong_image(self):
instance_href = utils.get_test_instance()
image_info = utils.get_test_image_info(None, instance_href)
image_info['disk_format'] = 'raw'
image_info['container_format'] = 'invalid_format'
self.assertRaises(exception.InstanceDeployFailure,
self.test_create_container,
image_info)
@mock.patch.object(network, 'teardown_network')
@mock.patch.object(nova.virt.docker.driver.DockerDriver,
'find_container_by_name', return_value={'id': 'fake_id'})
def test_destroy_container(self, byname_mock, teardown_mock):
instance = utils.get_test_instance()
self.connection.destroy(self.context, instance, 'fake_networkinfo')
byname_mock.assert_called_once_with(instance['name'])
teardown_mock.assert_called_with('fake_id')
def test_get_memory_limit_from_sys_meta_in_object(self):
instance = utils.get_test_instance(obj=True)
limit = self.connection._get_memory_limit_bytes(instance)
self.assertEqual(2048 * units.Mi, limit)
def test_get_memory_limit_from_sys_meta_in_db_instance(self):
instance = utils.get_test_instance(obj=False)
limit = self.connection._get_memory_limit_bytes(instance)
self.assertEqual(2048 * units.Mi, limit)
| [] |
l04m33/pyx | pyx/tests/test_http.py | b70efec605832ba3c7079e991584db3f5d1da8cb | import unittest
import unittest.mock as mock
import asyncio
import pyx.http as http
def create_dummy_message():
msg = http.HttpMessage(None)
msg.headers = [
http.HttpHeader('Server', 'Pyx'),
http.HttpHeader('Cookie', 'a'),
http.HttpHeader('Cookie', 'b'),
]
return msg
def create_dummy_connection():
loop = asyncio.get_event_loop()
reader = asyncio.StreamReader(loop=loop)
@asyncio.coroutine
def dummy_drain():
yield from asyncio.sleep(0.001)
writer = mock.Mock(spec=asyncio.StreamWriter)
writer.attach_mock(mock.Mock(wraps=dummy_drain), 'drain')
conn = http.HttpConnection(reader, writer)
return conn
def create_dummy_request():
conn = create_dummy_connection()
req = http.HttpRequest(conn)
return req
class TestHttpMessage(unittest.TestCase):
def test_get_header(self):
msg = create_dummy_message()
self.assertEqual(msg.get_header("server"), ["Pyx"])
self.assertEqual(msg.get_header("SERVER"), ["Pyx"])
self.assertEqual(msg.get_header("pragma"), [])
self.assertEqual(msg.get_header("cookie"), ["a", "b"])
self.assertEqual(msg.get_first_header("cookie"), "a")
self.assertTrue(msg.get_first_header("pragma") is None)
def test_write_headers(self):
msg = create_dummy_message()
self.assertEqual(msg.write_headers(),
['Server: Pyx', 'Cookie: a', 'Cookie: b'])
msg.headers = []
self.assertEqual(msg.write_headers(), [])
class TestHttpRequest(unittest.TestCase):
def test_parse_req_line(self):
req = create_dummy_request()
req._parse_req_line(b'POST / HTTP/1.1\r\n')
self.assertEqual(req.method, 'POST')
self.assertEqual(req.path, '/')
self.assertTrue(req.query is None)
self.assertEqual(req.protocol, 'HTTP')
self.assertEqual(req.version, (1, 1))
req._parse_req_line(
b'GET /some/path?some=query&some_other=query HTTP/1.1\r\n')
self.assertEqual(req.method, 'GET')
self.assertEqual(req.path, '/some/path')
self.assertEqual(req.query, 'some=query&some_other=query')
with self.assertRaises(http.BadHttpRequestError):
req._parse_req_line(b'')
with self.assertRaises(http.BadHttpRequestError):
req._parse_req_line(b'GET /\r\n')
with self.assertRaises(http.BadHttpRequestError):
req._parse_req_line(b'GET / GARBAGE\r\n')
req._parse_req_line(b'GET / HTTP/1\r\n')
self.assertEqual(req.version, (1, 0))
def test_parse_header(self):
req = create_dummy_request()
req._parse_header(b'Server: Pyx\r\n')
self.assertEqual(req.headers, [http.HttpHeader('Server', 'Pyx')])
req.headers = []
with self.assertRaises(http.BadHttpHeaderError):
req._parse_header(b'Server\r\n')
req.headers = []
req._parse_header(b'Server:\r\n')
self.assertEqual(req.headers, [http.HttpHeader('Server', '')])
req.headers = []
req._parse_header(b'Server: \r\n')
self.assertEqual(req.headers, [http.HttpHeader('Server', '')])
req.headers = []
req._parse_header(b'Host: some.badasshost.com:8080\r\n')
self.assertEqual(req.headers, [http.HttpHeader('Host', 'some.badasshost.com:8080')])
with self.assertRaises(http.BadHttpHeaderError):
req._parse_header(b': pyx\r\n')
with self.assertRaises(http.BadHttpHeaderError):
req._parse_header(b' : pyx')
with self.assertRaises(http.BadHttpHeaderError):
req._parse_header(b' \t : pyx')
def test_parse(self):
loop = asyncio.get_event_loop()
conn = create_dummy_connection()
reader = conn.reader
reader.feed_data(
b'GET /?q=p&s=t HTTP/1.1\r\n'
b'Host: localhost\r\n'
b'Connection: Keep-Alive\r\n'
b'Pragma: Test\r\n'
b' : Test\r\n'
b'\r\n')
req = loop.run_until_complete(http.HttpRequest.parse(conn))
self.assertEqual(req.method, 'GET')
self.assertEqual(req.path, '/')
self.assertEqual(req.query, 'q=p&s=t')
self.assertEqual(req.protocol, 'HTTP')
self.assertEqual(req.version, (1, 1))
self.assertEqual(req.headers,
[
http.HttpHeader('Host', 'localhost'),
http.HttpHeader('Connection', 'Keep-Alive'),
http.HttpHeader('Pragma', 'Test'),
])
def test_respond(self):
req = create_dummy_request()
req.version = (1, 1)
resp = req.respond(200)
self.assertEqual(resp.code, 200)
self.assertEqual(resp.version, (1, 1))
req.version = (1, 0)
resp = req.respond(400)
self.assertEqual(resp.code, 400)
self.assertEqual(resp.version, (1, 0))
class TestHttpResponse(unittest.TestCase):
def test_write(self):
resp = http.HttpResponse(200, None)
resp.headers = [
http.HttpHeader('Server', 'Pyx'),
http.HttpHeader('Connection', 'keep-alive')
]
self.assertEqual(resp.write(),
['HTTP/1.1 200 OK',
'Server: Pyx',
'Connection: keep-alive',
'\r\n'])
self.assertEqual(str(resp),
'HTTP/1.1 200 OK\r\n'
'Server: Pyx\r\n'
'Connection: keep-alive\r\n'
'\r\n')
def test_send(self):
loop = asyncio.get_event_loop()
req = create_dummy_request()
resp = req.respond(200)
self.assertEqual(resp.code, 200)
self.assertFalse(req.responded)
resp.headers = [
http.HttpHeader('Server', 'Pyx'),
http.HttpHeader('Content-Length', '100'),
http.HttpHeader('Content-Type', 'text/plain'),
]
loop.run_until_complete(resp.send())
resp.connection.writer.write.assert_called_with(str(resp).encode())
self.assertTrue(req.responded)
def test_send_body(self):
loop = asyncio.get_event_loop()
req = create_dummy_request()
resp = req.respond(200)
loop.run_until_complete(resp.send())
self.assertTrue(req.responded)
loop.run_until_complete(resp.send_body(b'Yes, this is the body.'))
resp.connection.writer.write.assert_called_with(b'Yes, this is the body.')
loop.run_until_complete(resp.send_body('This is another string body.'))
resp.connection.writer.write.assert_called_with(b'This is another string body.')
class DummyResource(http.UrlResource):
def get_child(self, key):
if key == 'hello':
return self
elif key == "static":
return http.StaticRootResource('.')
else:
raise http.HttpError(404, '{} not found'.format(key))
class TestUrlResource(unittest.TestCase):
def test_traverse(self):
res = DummyResource()
self.assertEqual(res.traverse(''), res)
self.assertEqual(res.traverse('/'), res)
self.assertEqual(res.traverse('/hello'), res)
with self.assertRaises(http.HttpError):
res.traverse('/does/not/exist')
sres = res.traverse('/static')
self.assertEqual(sres.root, '.')
self.assertEqual(sres._build_real_path(), '.')
sres = res.traverse('/static/')
self.assertEqual(sres._build_real_path(), '.')
sres = res.traverse('/static/some/path')
self.assertEqual(sres._build_real_path(), './some/path')
def test_not_implemented(self):
res = http.UrlResource()
with self.assertRaises(NotImplementedError):
res.traverse('/hello')
req = create_dummy_request()
with self.assertRaises(NotImplementedError):
res.handle_request(req)
class TestStaticRootResource(unittest.TestCase):
def test_build_real_path(self):
res = http.StaticRootResource('local_root')
res = res.traverse('/some/long/path/where/ever/it/leads/')
self.assertEqual(res._build_real_path(),
'local_root/some/long/path/where/ever/it/leads')
res = http.StaticRootResource('local_root')
res = res.traverse('/some/../dangerous/path')
self.assertEqual(res._build_real_path(),
'local_root/dangerous/path')
res = http.StaticRootResource('local_root')
res = res.traverse('/some/../../dangerous/path')
self.assertEqual(res._build_real_path(),
'local_root/dangerous/path')
res = http.StaticRootResource('local_root')
res = res.traverse('/some/%2e%2e%2f%2e%2e/dangerous/path')
self.assertEqual(res._build_real_path(),
'local_root/dangerous/path')
| [((8, 10, 8, 32), 'pyx.http.HttpMessage', 'http.HttpMessage', ({(8, 27, 8, 31): 'None'}, {}), '(None)', True, 'import pyx.http as http\n'), ((18, 11, 18, 35), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ({}, {}), '()', False, 'import asyncio\n'), ((20, 13, 20, 44), 'asyncio.StreamReader', 'asyncio.StreamReader', (), '', False, 'import asyncio\n'), ((25, 13, 25, 49), 'unittest.mock.Mock', 'mock.Mock', (), '', True, 'import unittest.mock as mock\n'), ((28, 11, 28, 46), 'pyx.http.HttpConnection', 'http.HttpConnection', ({(28, 31, 28, 37): 'reader', (28, 39, 28, 45): 'writer'}, {}), '(reader, writer)', True, 'import pyx.http as http\n'), ((34, 10, 34, 32), 'pyx.http.HttpRequest', 'http.HttpRequest', ({(34, 27, 34, 31): 'conn'}, {}), '(conn)', True, 'import pyx.http as http\n'), ((10, 8, 10, 40), 'pyx.http.HttpHeader', 'http.HttpHeader', ({(10, 24, 10, 32): '"""Server"""', (10, 34, 10, 39): '"""Pyx"""'}, {}), "('Server', 'Pyx')", True, 'import pyx.http as http\n'), ((11, 8, 11, 38), 'pyx.http.HttpHeader', 'http.HttpHeader', ({(11, 24, 11, 32): '"""Cookie"""', (11, 34, 11, 37): '"""a"""'}, {}), "('Cookie', 'a')", True, 'import pyx.http as http\n'), ((12, 8, 12, 38), 'pyx.http.HttpHeader', 'http.HttpHeader', ({(12, 24, 12, 32): '"""Cookie"""', (12, 34, 12, 37): '"""b"""'}, {}), "('Cookie', 'b')", True, 'import pyx.http as http\n'), ((26, 23, 26, 51), 'unittest.mock.Mock', 'mock.Mock', (), '', True, 'import unittest.mock as mock\n'), ((120, 15, 120, 39), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ({}, {}), '()', False, 'import asyncio\n'), ((162, 15, 162, 43), 'pyx.http.HttpResponse', 'http.HttpResponse', ({(162, 33, 162, 36): '200', (162, 38, 162, 42): 'None'}, {}), '(200, None)', True, 'import pyx.http as http\n'), ((179, 15, 179, 39), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ({}, {}), '()', False, 'import asyncio\n'), ((195, 15, 195, 39), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ({}, {}), '()', False, 'import asyncio\n'), ((240, 14, 240, 32), 'pyx.http.UrlResource', 'http.UrlResource', ({}, {}), '()', True, 'import pyx.http as http\n'), ((251, 14, 251, 51), 'pyx.http.StaticRootResource', 'http.StaticRootResource', ({(251, 38, 251, 50): '"""local_root"""'}, {}), "('local_root')", True, 'import pyx.http as http\n'), ((256, 14, 256, 51), 'pyx.http.StaticRootResource', 'http.StaticRootResource', ({(256, 38, 256, 50): '"""local_root"""'}, {}), "('local_root')", True, 'import pyx.http as http\n'), ((261, 14, 261, 51), 'pyx.http.StaticRootResource', 'http.StaticRootResource', ({(261, 38, 261, 50): '"""local_root"""'}, {}), "('local_root')", True, 'import pyx.http as http\n'), ((266, 14, 266, 51), 'pyx.http.StaticRootResource', 'http.StaticRootResource', ({(266, 38, 266, 50): '"""local_root"""'}, {}), "('local_root')", True, 'import pyx.http as http\n'), ((24, 19, 24, 39), 'asyncio.sleep', 'asyncio.sleep', ({(24, 33, 24, 38): '(0.001)'}, {}), '(0.001)', False, 'import asyncio\n'), ((132, 38, 132, 66), 'pyx.http.HttpRequest.parse', 'http.HttpRequest.parse', ({(132, 61, 132, 65): 'conn'}, {}), '(conn)', True, 'import pyx.http as http\n'), ((164, 12, 164, 44), 'pyx.http.HttpHeader', 'http.HttpHeader', ({(164, 28, 164, 36): '"""Server"""', (164, 38, 164, 43): '"""Pyx"""'}, {}), "('Server', 'Pyx')", True, 'import pyx.http as http\n'), ((165, 12, 165, 55), 'pyx.http.HttpHeader', 'http.HttpHeader', ({(165, 28, 165, 40): '"""Connection"""', (165, 42, 165, 54): '"""keep-alive"""'}, {}), "('Connection', 'keep-alive')", True, 'import pyx.http as http\n'), ((186, 12, 186, 44), 'pyx.http.HttpHeader', 'http.HttpHeader', ({(186, 28, 186, 36): '"""Server"""', (186, 38, 186, 43): '"""Pyx"""'}, {}), "('Server', 'Pyx')", True, 'import pyx.http as http\n'), ((187, 12, 187, 52), 'pyx.http.HttpHeader', 'http.HttpHeader', ({(187, 28, 187, 44): '"""Content-Length"""', (187, 46, 187, 51): '"""100"""'}, {}), "('Content-Length', '100')", True, 'import pyx.http as http\n'), ((188, 12, 188, 57), 'pyx.http.HttpHeader', 'http.HttpHeader', ({(188, 28, 188, 42): '"""Content-Type"""', (188, 44, 188, 56): '"""text/plain"""'}, {}), "('Content-Type', 'text/plain')", True, 'import pyx.http as http\n'), ((92, 39, 92, 71), 'pyx.http.HttpHeader', 'http.HttpHeader', ({(92, 55, 92, 63): '"""Server"""', (92, 65, 92, 70): '"""Pyx"""'}, {}), "('Server', 'Pyx')", True, 'import pyx.http as http\n'), ((100, 39, 100, 68), 'pyx.http.HttpHeader', 'http.HttpHeader', ({(100, 55, 100, 63): '"""Server"""', (100, 65, 100, 67): '""""""'}, {}), "('Server', '')", True, 'import pyx.http as http\n'), ((104, 39, 104, 68), 'pyx.http.HttpHeader', 'http.HttpHeader', ({(104, 55, 104, 63): '"""Server"""', (104, 65, 104, 67): '""""""'}, {}), "('Server', '')", True, 'import pyx.http as http\n'), ((108, 39, 108, 90), 'pyx.http.HttpHeader', 'http.HttpHeader', ({(108, 55, 108, 61): '"""Host"""', (108, 63, 108, 89): '"""some.badasshost.com:8080"""'}, {}), "('Host', 'some.badasshost.com:8080')", True, 'import pyx.http as http\n'), ((141, 29, 141, 65), 'pyx.http.HttpHeader', 'http.HttpHeader', ({(141, 45, 141, 51): '"""Host"""', (141, 53, 141, 64): '"""localhost"""'}, {}), "('Host', 'localhost')", True, 'import pyx.http as http\n'), ((142, 29, 142, 72), 'pyx.http.HttpHeader', 'http.HttpHeader', ({(142, 45, 142, 57): '"""Connection"""', (142, 59, 142, 71): '"""Keep-Alive"""'}, {}), "('Connection', 'Keep-Alive')", True, 'import pyx.http as http\n'), ((143, 29, 143, 62), 'pyx.http.HttpHeader', 'http.HttpHeader', ({(143, 45, 143, 53): '"""Pragma"""', (143, 55, 143, 61): '"""Test"""'}, {}), "('Pragma', 'Test')", True, 'import pyx.http as http\n'), ((214, 19, 214, 47), 'pyx.http.StaticRootResource', 'http.StaticRootResource', ({(214, 43, 214, 46): '"""."""'}, {}), "('.')", True, 'import pyx.http as http\n')] |
kidosoft/splinter | tests/test_webdriver_chrome.py | 6d5052fd73c0a626299574cea76924e367c67faa | # -*- coding: utf-8 -*-
# Copyright 2013 splinter authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import os
import unittest
from splinter import Browser
from .fake_webapp import EXAMPLE_APP
from .base import WebDriverTests
from selenium.common.exceptions import WebDriverException
def chrome_installed():
try:
Browser("chrome")
except WebDriverException:
return False
return True
class ChromeBrowserTest(WebDriverTests, unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.browser = Browser("chrome")
@classmethod
def tearDownClass(cls):
cls.browser.quit()
def setUp(self):
self.browser.visit(EXAMPLE_APP)
def test_attach_file(self):
"should provide a way to change file field value"
file_path = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'mockfile.txt'
)
self.browser.attach_file('file', file_path)
self.browser.find_by_name('upload').click()
html = self.browser.html
self.assertIn('text/plain', html)
self.assertIn(open(file_path).read().encode('utf-8'), html)
def test_should_support_with_statement(self):
with Browser('chrome') as internet:
pass
class ChromeBrowserFullscreenTest(WebDriverTests, unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.browser = Browser("chrome", fullscreen=True)
@classmethod
def tearDownClass(cls):
cls.browser.quit()
def setUp(self):
self.browser.visit(EXAMPLE_APP)
def test_should_support_with_statement(self):
with Browser('chrome', fullscreen=True) as internet:
pass
| [((18, 8, 18, 25), 'splinter.Browser', 'Browser', ({(18, 16, 18, 24): '"""chrome"""'}, {}), "('chrome')", False, 'from splinter import Browser\n'), ((28, 22, 28, 39), 'splinter.Browser', 'Browser', ({(28, 30, 28, 38): '"""chrome"""'}, {}), "('chrome')", False, 'from splinter import Browser\n'), ((59, 22, 59, 56), 'splinter.Browser', 'Browser', (), '', False, 'from splinter import Browser\n'), ((51, 13, 51, 30), 'splinter.Browser', 'Browser', ({(51, 21, 51, 29): '"""chrome"""'}, {}), "('chrome')", False, 'from splinter import Browser\n'), ((69, 13, 69, 47), 'splinter.Browser', 'Browser', (), '', False, 'from splinter import Browser\n'), ((40, 28, 40, 53), 'os.path.dirname', 'os.path.dirname', ({(40, 44, 40, 52): '__file__'}, {}), '(__file__)', False, 'import os\n')] |
DuskXi/ArkX | main.py | 7b416ae0c4ec2b383c6f414ed475930dd228909f | import os
import json
from File.file import File
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def fileRead(fileName, encoding='utf-8'):
with open(fileName, encoding=encoding) as f:
return f.read()
def main():
from Automation.distributor import Distributor
from Performance import recoder
from WebInterface import web
modelConfig = json.loads(fileRead("config/model.json"))
labelsName = json.loads(fileRead("config/labelsName.json"))
config = json.loads(fileRead("config/config.json"))
# file = File()
classifyModel = modelConfig["imageClassificationModel"]
# if not file.mergedFile(classifyModel["filePath"], classifyModel["fileName"], classifyModel["files"]):
# print("文件合并失败")
# print("回车退出")
# input()
# exit(0)
recoder.Recoder.debug = False
recoder.Recoder.debugSleepingTime = 60 * 60
recoder.Recoder.initDataSet([modelConfig["objectDetectionModel"]["modelName"], modelConfig["addSanityModel"]["modelName"]],
[classifyModel["modelName"]])
# modelConfig["imageClassificationModel"]["filePath"] = os.path.join(classifyModel["filePath"], classifyModel["fileName"])
distributor = Distributor(modelConfig, config["adb_path"], labelsName)
web.run(distributor, config)
if __name__ == "__main__":
main()
| [((34, 4, 35, 61), 'Performance.recoder.Recoder.initDataSet', 'recoder.Recoder.initDataSet', ({(34, 32, 34, 126): "[modelConfig['objectDetectionModel']['modelName'], modelConfig[\n 'addSanityModel']['modelName']]", (35, 32, 35, 60): "[classifyModel['modelName']]"}, {}), "([modelConfig['objectDetectionModel'][\n 'modelName'], modelConfig['addSanityModel']['modelName']], [\n classifyModel['modelName']])", False, 'from Performance import recoder\n'), ((38, 18, 38, 74), 'Automation.distributor.Distributor', 'Distributor', ({(38, 30, 38, 41): 'modelConfig', (38, 43, 38, 61): "config['adb_path']", (38, 63, 38, 73): 'labelsName'}, {}), "(modelConfig, config['adb_path'], labelsName)", False, 'from Automation.distributor import Distributor\n'), ((39, 4, 39, 32), 'WebInterface.web.run', 'web.run', ({(39, 12, 39, 23): 'distributor', (39, 25, 39, 31): 'config'}, {}), '(distributor, config)', False, 'from WebInterface import web\n')] |
cprogrammer1994/miniglm | tests/test_simple.py | 696764ff200dd106dd533264ff45a060d5f7b230 | import struct
import numpy as np
import pytest
import miniglm
def test_add_vec_vec():
res = miniglm.add((1.0, 2.0, 3.0), (1.5, 1.8, 1.2))
np.testing.assert_almost_equal(res, (2.5, 3.8, 4.2))
assert type(res) is tuple
def test_add_vec_scalar():
res = miniglm.add((1.0, 2.0, 3.0), 0.5)
np.testing.assert_almost_equal(res, (1.5, 2.5, 3.5))
assert type(res) is tuple
def test_sub_vec_vec():
res = miniglm.sub((5.0, 6.0, 7.0), (1.5, 1.8, 1.2))
np.testing.assert_almost_equal(res, (3.5, 4.2, 5.8))
assert type(res) is tuple
def test_sub_vec_scalar():
res = miniglm.sub((5.0, 6.0, 7.0), 1.5)
np.testing.assert_almost_equal(res, (3.5, 4.5, 5.5))
assert type(res) is tuple
def test_mul_vec_vec():
res = miniglm.mul((5.0, 6.0, 7.0), (1.5, 1.8, 1.2))
np.testing.assert_almost_equal(res, (7.5, 10.8, 8.4))
assert type(res) is tuple
def test_mul_vec_scalar():
res = miniglm.mul((1.0, 2.0, 3.0), 2.0)
np.testing.assert_almost_equal(res, (2.0, 4.0, 6.0))
assert type(res) is tuple
def test_cross():
res = miniglm.cross((2.0, 3.5, 7.1), (0.2, 10.0, 3.3))
np.testing.assert_almost_equal(res, (-59.45, -5.18, 19.3))
assert type(res) is tuple
def test_dot_vec():
res = miniglm.dot((2.0, 3.5, 7.1), (0.2, 10.0, 3.3))
np.testing.assert_almost_equal(res, 58.83)
def test_dot_quat():
res = miniglm.dot((2.0, 3.5, 7.1), (0.2, 10.0, 3.3))
np.testing.assert_almost_equal(res, 58.83)
def test_mix_vec():
res = miniglm.mix((2.5, 3.4, 4.6), (7.2, 1.1, 3.2), 0.2)
np.testing.assert_almost_equal(res, (3.44, 2.94, 4.32))
assert type(res) is tuple
def test_mix_scalar():
res = miniglm.mix(1.0, 3.0, 0.5)
np.testing.assert_almost_equal(res, 2.0)
def test_rotate():
res = miniglm.rotate(miniglm.pi / 3.0, miniglm.norm((0.48, 0.60, 0.64)))
expected = (0.24, 0.3, 0.32, 0.8660254037844387)
np.testing.assert_almost_equal(res, expected)
assert type(res) is tuple
def test_split_quat():
quat = (0.24, 0.3, 0.32, 0.8660254037844387)
angle, axis = miniglm.split(quat)
np.testing.assert_almost_equal(angle, miniglm.pi / 3.0)
np.testing.assert_almost_equal(axis, (0.48, 0.60, 0.64))
assert type(axis) is tuple
def test_rotate_x_90_deg():
res = miniglm.rotate(miniglm.pi / 2.0, (1.0, 0.0, 0.0))
np.testing.assert_almost_equal(res, (np.sqrt(2.0) / 2.0, 0.0, 0.0, np.sqrt(2.0) / 2.0))
def test_rotate_y_90_deg():
res = miniglm.rotate(miniglm.pi / 2.0, (0.0, 1.0, 0.0))
np.testing.assert_almost_equal(res, (0.0, np.sqrt(2.0) / 2.0, 0.0, np.sqrt(2.0) / 2.0))
def test_rotate_z_90_deg():
res = miniglm.rotate(miniglm.pi / 2.0, (0.0, 0.0, 1.0))
np.testing.assert_almost_equal(res, (0.0, 0.0, np.sqrt(2.0) / 2.0, np.sqrt(2.0) / 2.0))
def test_norm_vec():
res = miniglm.norm((48.0, 60.0, 64.0))
expected = (0.48, 0.60, 0.64)
np.testing.assert_almost_equal(res, expected)
assert type(res) is tuple
def test_norm_quat():
res = miniglm.norm((2.0, 4.0, 8.0, 4.0))
expected = (0.2, 0.4, 0.8, 0.4)
np.testing.assert_almost_equal(res, expected)
assert type(res) is tuple
def test_norm_mat():
mat = (
0.074, 0.962, -0.259,
-0.518, 0.259, 0.814,
0.851, 0.074, 0.518,
)
res = miniglm.norm(mat)
np.testing.assert_almost_equal(miniglm.det(res), 1.0)
np.testing.assert_almost_equal(miniglm.cross(res[0:3], res[3:6]), res[6:9])
np.testing.assert_almost_equal(miniglm.dot(res[0:3], res[3:6]), 0.0)
np.testing.assert_almost_equal(miniglm.dot(res[3:6], res[6:9]), 0.0)
np.testing.assert_almost_equal(miniglm.dot(res[0:3], res[6:9]), 0.0)
assert type(res) is tuple
def test_cast():
quat = (0.2, 0.4, 0.8, 0.4)
mat = (-0.6, 0.8, 0.0, -0.48, -0.36, 0.8, 0.64, 0.48, 0.6)
np.testing.assert_almost_equal(miniglm.cast(quat), mat)
np.testing.assert_almost_equal(miniglm.cast(mat), quat)
np.testing.assert_almost_equal(miniglm.cast(miniglm.cast(quat)), quat)
np.testing.assert_almost_equal(miniglm.cast(miniglm.cast(mat)), mat)
def test_swizzle_vec():
res = miniglm.swizzle((1.0, 2.0, 3.0), 'yxz')
np.testing.assert_almost_equal(res, (2.0, 1.0, 3.0))
def test_swizzle_quat():
res = miniglm.swizzle((0.1, 0.7, 0.5, 0.5), 'wxyz')
np.testing.assert_almost_equal(res, (0.5, 0.1, 0.7, 0.5))
def test_pack_scalar():
assert miniglm.pack(1.75) == struct.pack('f', 1.75)
def test_pack_vec():
vec = (1.0, 2.0, 3.0)
assert miniglm.pack(vec) == struct.pack('fff', *vec)
def test_pack_quat():
quat = (0.1, 0.7, 0.5, 0.5)
assert miniglm.pack(quat) == struct.pack('ffff', *quat)
def test_pack_mat():
mat = (1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0)
assert miniglm.pack(mat) == struct.pack('fffffffff', *mat)
| [((10, 10, 10, 55), 'miniglm.add', 'miniglm.add', ({(10, 22, 10, 37): '(1.0, 2.0, 3.0)', (10, 39, 10, 54): '(1.5, 1.8, 1.2)'}, {}), '((1.0, 2.0, 3.0), (1.5, 1.8, 1.2))', False, 'import miniglm\n'), ((11, 4, 11, 56), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', ({(11, 35, 11, 38): 'res', (11, 40, 11, 55): '(2.5, 3.8, 4.2)'}, {}), '(res, (2.5, 3.8, 4.2))', True, 'import numpy as np\n'), ((16, 10, 16, 43), 'miniglm.add', 'miniglm.add', ({(16, 22, 16, 37): '(1.0, 2.0, 3.0)', (16, 39, 16, 42): '0.5'}, {}), '((1.0, 2.0, 3.0), 0.5)', False, 'import miniglm\n'), ((17, 4, 17, 56), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', ({(17, 35, 17, 38): 'res', (17, 40, 17, 55): '(1.5, 2.5, 3.5)'}, {}), '(res, (1.5, 2.5, 3.5))', True, 'import numpy as np\n'), ((22, 10, 22, 55), 'miniglm.sub', 'miniglm.sub', ({(22, 22, 22, 37): '(5.0, 6.0, 7.0)', (22, 39, 22, 54): '(1.5, 1.8, 1.2)'}, {}), '((5.0, 6.0, 7.0), (1.5, 1.8, 1.2))', False, 'import miniglm\n'), ((23, 4, 23, 56), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', ({(23, 35, 23, 38): 'res', (23, 40, 23, 55): '(3.5, 4.2, 5.8)'}, {}), '(res, (3.5, 4.2, 5.8))', True, 'import numpy as np\n'), ((28, 10, 28, 43), 'miniglm.sub', 'miniglm.sub', ({(28, 22, 28, 37): '(5.0, 6.0, 7.0)', (28, 39, 28, 42): '1.5'}, {}), '((5.0, 6.0, 7.0), 1.5)', False, 'import miniglm\n'), ((29, 4, 29, 56), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', ({(29, 35, 29, 38): 'res', (29, 40, 29, 55): '(3.5, 4.5, 5.5)'}, {}), '(res, (3.5, 4.5, 5.5))', True, 'import numpy as np\n'), ((34, 10, 34, 55), 'miniglm.mul', 'miniglm.mul', ({(34, 22, 34, 37): '(5.0, 6.0, 7.0)', (34, 39, 34, 54): '(1.5, 1.8, 1.2)'}, {}), '((5.0, 6.0, 7.0), (1.5, 1.8, 1.2))', False, 'import miniglm\n'), ((35, 4, 35, 57), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', ({(35, 35, 35, 38): 'res', (35, 40, 35, 56): '(7.5, 10.8, 8.4)'}, {}), '(res, (7.5, 10.8, 8.4))', True, 'import numpy as np\n'), ((40, 10, 40, 43), 'miniglm.mul', 'miniglm.mul', ({(40, 22, 40, 37): '(1.0, 2.0, 3.0)', (40, 39, 40, 42): '2.0'}, {}), '((1.0, 2.0, 3.0), 2.0)', False, 'import miniglm\n'), ((41, 4, 41, 56), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', ({(41, 35, 41, 38): 'res', (41, 40, 41, 55): '(2.0, 4.0, 6.0)'}, {}), '(res, (2.0, 4.0, 6.0))', True, 'import numpy as np\n'), ((46, 10, 46, 58), 'miniglm.cross', 'miniglm.cross', ({(46, 24, 46, 39): '(2.0, 3.5, 7.1)', (46, 41, 46, 57): '(0.2, 10.0, 3.3)'}, {}), '((2.0, 3.5, 7.1), (0.2, 10.0, 3.3))', False, 'import miniglm\n'), ((47, 4, 47, 62), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', ({(47, 35, 47, 38): 'res', (47, 40, 47, 61): '(-59.45, -5.18, 19.3)'}, {}), '(res, (-59.45, -5.18, 19.3))', True, 'import numpy as np\n'), ((52, 10, 52, 56), 'miniglm.dot', 'miniglm.dot', ({(52, 22, 52, 37): '(2.0, 3.5, 7.1)', (52, 39, 52, 55): '(0.2, 10.0, 3.3)'}, {}), '((2.0, 3.5, 7.1), (0.2, 10.0, 3.3))', False, 'import miniglm\n'), ((53, 4, 53, 46), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', ({(53, 35, 53, 38): 'res', (53, 40, 53, 45): '(58.83)'}, {}), '(res, 58.83)', True, 'import numpy as np\n'), ((57, 10, 57, 56), 'miniglm.dot', 'miniglm.dot', ({(57, 22, 57, 37): '(2.0, 3.5, 7.1)', (57, 39, 57, 55): '(0.2, 10.0, 3.3)'}, {}), '((2.0, 3.5, 7.1), (0.2, 10.0, 3.3))', False, 'import miniglm\n'), ((58, 4, 58, 46), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', ({(58, 35, 58, 38): 'res', (58, 40, 58, 45): '(58.83)'}, {}), '(res, 58.83)', True, 'import numpy as np\n'), ((62, 10, 62, 60), 'miniglm.mix', 'miniglm.mix', ({(62, 22, 62, 37): '(2.5, 3.4, 4.6)', (62, 39, 62, 54): '(7.2, 1.1, 3.2)', (62, 56, 62, 59): '0.2'}, {}), '((2.5, 3.4, 4.6), (7.2, 1.1, 3.2), 0.2)', False, 'import miniglm\n'), ((63, 4, 63, 59), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', ({(63, 35, 63, 38): 'res', (63, 40, 63, 58): '(3.44, 2.94, 4.32)'}, {}), '(res, (3.44, 2.94, 4.32))', True, 'import numpy as np\n'), ((68, 10, 68, 36), 'miniglm.mix', 'miniglm.mix', ({(68, 22, 68, 25): '1.0', (68, 27, 68, 30): '3.0', (68, 32, 68, 35): '0.5'}, {}), '(1.0, 3.0, 0.5)', False, 'import miniglm\n'), ((69, 4, 69, 44), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', ({(69, 35, 69, 38): 'res', (69, 40, 69, 43): '(2.0)'}, {}), '(res, 2.0)', True, 'import numpy as np\n'), ((75, 4, 75, 49), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', ({(75, 35, 75, 38): 'res', (75, 40, 75, 48): 'expected'}, {}), '(res, expected)', True, 'import numpy as np\n'), ((81, 18, 81, 37), 'miniglm.split', 'miniglm.split', ({(81, 32, 81, 36): 'quat'}, {}), '(quat)', False, 'import miniglm\n'), ((82, 4, 82, 59), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', ({(82, 35, 82, 40): 'angle', (82, 42, 82, 58): '(miniglm.pi / 3.0)'}, {}), '(angle, miniglm.pi / 3.0)', True, 'import numpy as np\n'), ((83, 4, 83, 60), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', ({(83, 35, 83, 39): 'axis', (83, 41, 83, 59): '(0.48, 0.6, 0.64)'}, {}), '(axis, (0.48, 0.6, 0.64))', True, 'import numpy as np\n'), ((88, 10, 88, 59), 'miniglm.rotate', 'miniglm.rotate', ({(88, 25, 88, 41): 'miniglm.pi / 2.0', (88, 43, 88, 58): '(1.0, 0.0, 0.0)'}, {}), '(miniglm.pi / 2.0, (1.0, 0.0, 0.0))', False, 'import miniglm\n'), ((93, 10, 93, 59), 'miniglm.rotate', 'miniglm.rotate', ({(93, 25, 93, 41): 'miniglm.pi / 2.0', (93, 43, 93, 58): '(0.0, 1.0, 0.0)'}, {}), '(miniglm.pi / 2.0, (0.0, 1.0, 0.0))', False, 'import miniglm\n'), ((98, 10, 98, 59), 'miniglm.rotate', 'miniglm.rotate', ({(98, 25, 98, 41): 'miniglm.pi / 2.0', (98, 43, 98, 58): '(0.0, 0.0, 1.0)'}, {}), '(miniglm.pi / 2.0, (0.0, 0.0, 1.0))', False, 'import miniglm\n'), ((103, 10, 103, 42), 'miniglm.norm', 'miniglm.norm', ({(103, 23, 103, 41): '(48.0, 60.0, 64.0)'}, {}), '((48.0, 60.0, 64.0))', False, 'import miniglm\n'), ((105, 4, 105, 49), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', ({(105, 35, 105, 38): 'res', (105, 40, 105, 48): 'expected'}, {}), '(res, expected)', True, 'import numpy as np\n'), ((110, 10, 110, 44), 'miniglm.norm', 'miniglm.norm', ({(110, 23, 110, 43): '(2.0, 4.0, 8.0, 4.0)'}, {}), '((2.0, 4.0, 8.0, 4.0))', False, 'import miniglm\n'), ((112, 4, 112, 49), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', ({(112, 35, 112, 38): 'res', (112, 40, 112, 48): 'expected'}, {}), '(res, expected)', True, 'import numpy as np\n'), ((122, 10, 122, 27), 'miniglm.norm', 'miniglm.norm', ({(122, 23, 122, 26): 'mat'}, {}), '(mat)', False, 'import miniglm\n'), ((141, 10, 141, 49), 'miniglm.swizzle', 'miniglm.swizzle', ({(141, 26, 141, 41): '(1.0, 2.0, 3.0)', (141, 43, 141, 48): '"""yxz"""'}, {}), "((1.0, 2.0, 3.0), 'yxz')", False, 'import miniglm\n'), ((142, 4, 142, 56), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', ({(142, 35, 142, 38): 'res', (142, 40, 142, 55): '(2.0, 1.0, 3.0)'}, {}), '(res, (2.0, 1.0, 3.0))', True, 'import numpy as np\n'), ((146, 10, 146, 55), 'miniglm.swizzle', 'miniglm.swizzle', ({(146, 26, 146, 46): '(0.1, 0.7, 0.5, 0.5)', (146, 48, 146, 54): '"""wxyz"""'}, {}), "((0.1, 0.7, 0.5, 0.5), 'wxyz')", False, 'import miniglm\n'), ((147, 4, 147, 61), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', ({(147, 35, 147, 38): 'res', (147, 40, 147, 60): '(0.5, 0.1, 0.7, 0.5)'}, {}), '(res, (0.5, 0.1, 0.7, 0.5))', True, 'import numpy as np\n'), ((73, 43, 73, 75), 'miniglm.norm', 'miniglm.norm', ({(73, 56, 73, 74): '(0.48, 0.6, 0.64)'}, {}), '((0.48, 0.6, 0.64))', False, 'import miniglm\n'), ((123, 35, 123, 51), 'miniglm.det', 'miniglm.det', ({(123, 47, 123, 50): 'res'}, {}), '(res)', False, 'import miniglm\n'), ((124, 35, 124, 68), 'miniglm.cross', 'miniglm.cross', ({(124, 49, 124, 57): 'res[0:3]', (124, 59, 124, 67): 'res[3:6]'}, {}), '(res[0:3], res[3:6])', False, 'import miniglm\n'), ((125, 35, 125, 66), 'miniglm.dot', 'miniglm.dot', ({(125, 47, 125, 55): 'res[0:3]', (125, 57, 125, 65): 'res[3:6]'}, {}), '(res[0:3], res[3:6])', False, 'import miniglm\n'), ((126, 35, 126, 66), 'miniglm.dot', 'miniglm.dot', ({(126, 47, 126, 55): 'res[3:6]', (126, 57, 126, 65): 'res[6:9]'}, {}), '(res[3:6], res[6:9])', False, 'import miniglm\n'), ((127, 35, 127, 66), 'miniglm.dot', 'miniglm.dot', ({(127, 47, 127, 55): 'res[0:3]', (127, 57, 127, 65): 'res[6:9]'}, {}), '(res[0:3], res[6:9])', False, 'import miniglm\n'), ((134, 35, 134, 53), 'miniglm.cast', 'miniglm.cast', ({(134, 48, 134, 52): 'quat'}, {}), '(quat)', False, 'import miniglm\n'), ((135, 35, 135, 52), 'miniglm.cast', 'miniglm.cast', ({(135, 48, 135, 51): 'mat'}, {}), '(mat)', False, 'import miniglm\n'), ((151, 11, 151, 29), 'miniglm.pack', 'miniglm.pack', ({(151, 24, 151, 28): '(1.75)'}, {}), '(1.75)', False, 'import miniglm\n'), ((151, 33, 151, 55), 'struct.pack', 'struct.pack', ({(151, 45, 151, 48): '"""f"""', (151, 50, 151, 54): '(1.75)'}, {}), "('f', 1.75)", False, 'import struct\n'), ((156, 11, 156, 28), 'miniglm.pack', 'miniglm.pack', ({(156, 24, 156, 27): 'vec'}, {}), '(vec)', False, 'import miniglm\n'), ((156, 32, 156, 56), 'struct.pack', 'struct.pack', ({(156, 44, 156, 49): '"""fff"""', (156, 51, 156, 55): '*vec'}, {}), "('fff', *vec)", False, 'import struct\n'), ((161, 11, 161, 29), 'miniglm.pack', 'miniglm.pack', ({(161, 24, 161, 28): 'quat'}, {}), '(quat)', False, 'import miniglm\n'), ((161, 33, 161, 59), 'struct.pack', 'struct.pack', ({(161, 45, 161, 51): '"""ffff"""', (161, 53, 161, 58): '*quat'}, {}), "('ffff', *quat)", False, 'import struct\n'), ((166, 11, 166, 28), 'miniglm.pack', 'miniglm.pack', ({(166, 24, 166, 27): 'mat'}, {}), '(mat)', False, 'import miniglm\n'), ((166, 32, 166, 62), 'struct.pack', 'struct.pack', ({(166, 44, 166, 55): '"""fffffffff"""', (166, 57, 166, 61): '*mat'}, {}), "('fffffffff', *mat)", False, 'import struct\n'), ((136, 48, 136, 66), 'miniglm.cast', 'miniglm.cast', ({(136, 61, 136, 65): 'quat'}, {}), '(quat)', False, 'import miniglm\n'), ((137, 48, 137, 65), 'miniglm.cast', 'miniglm.cast', ({(137, 61, 137, 64): 'mat'}, {}), '(mat)', False, 'import miniglm\n'), ((89, 41, 89, 53), 'numpy.sqrt', 'np.sqrt', ({(89, 49, 89, 52): '(2.0)'}, {}), '(2.0)', True, 'import numpy as np\n'), ((89, 71, 89, 83), 'numpy.sqrt', 'np.sqrt', ({(89, 79, 89, 82): '(2.0)'}, {}), '(2.0)', True, 'import numpy as np\n'), ((94, 46, 94, 58), 'numpy.sqrt', 'np.sqrt', ({(94, 54, 94, 57): '(2.0)'}, {}), '(2.0)', True, 'import numpy as np\n'), ((94, 71, 94, 83), 'numpy.sqrt', 'np.sqrt', ({(94, 79, 94, 82): '(2.0)'}, {}), '(2.0)', True, 'import numpy as np\n'), ((99, 51, 99, 63), 'numpy.sqrt', 'np.sqrt', ({(99, 59, 99, 62): '(2.0)'}, {}), '(2.0)', True, 'import numpy as np\n'), ((99, 71, 99, 83), 'numpy.sqrt', 'np.sqrt', ({(99, 79, 99, 82): '(2.0)'}, {}), '(2.0)', True, 'import numpy as np\n')] |
konstantin1985/forum | flaskbb/plugins/news/views.py | 7d4de24ccc932e9764699d89c8cc9d210b7fac7f | # -*- coding: utf-8 -*-
from flask import Blueprint, redirect
from flaskbb.utils.helpers import render_template
from .forms import AddForm, DeleteForm
from .models import MyPost
from flaskbb.extensions import db
news = Blueprint("news", __name__, template_folder="templates")
def inject_news_link():
return render_template("navigation_snippet.html")
@news.route("/")
def index():
return render_template("index.html", newsposts = MyPost.query.all())
@news.route('/add', methods=['GET', 'POST'])
def add():
form = AddForm()
if form.validate_on_submit():
p = MyPost(name = form.name.data, text = form.text.data)
db.session.add(p)
db.session.commit()
return redirect('/news')
return render_template('add.html', form=form)
@news.route('/delete', methods=['GET', 'POST'])
def delete():
form = DeleteForm()
if form.validate_on_submit():
p = MyPost.query.filter(MyPost.name == form.name.data).first()
db.session.delete(p)
db.session.commit()
return redirect('/news')
return render_template('delete.html', form=form)
| [((9, 7, 9, 63), 'flask.Blueprint', 'Blueprint', (), '', False, 'from flask import Blueprint, redirect\n'), ((13, 11, 13, 53), 'flaskbb.utils.helpers.render_template', 'render_template', ({(13, 27, 13, 52): '"""navigation_snippet.html"""'}, {}), "('navigation_snippet.html')", False, 'from flaskbb.utils.helpers import render_template\n'), ((29, 11, 29, 49), 'flaskbb.utils.helpers.render_template', 'render_template', (), '', False, 'from flaskbb.utils.helpers import render_template\n'), ((39, 11, 39, 52), 'flaskbb.utils.helpers.render_template', 'render_template', (), '', False, 'from flaskbb.utils.helpers import render_template\n'), ((26, 8, 26, 25), 'flaskbb.extensions.db.session.add', 'db.session.add', ({(26, 23, 26, 24): 'p'}, {}), '(p)', False, 'from flaskbb.extensions import db\n'), ((27, 8, 27, 27), 'flaskbb.extensions.db.session.commit', 'db.session.commit', ({}, {}), '()', False, 'from flaskbb.extensions import db\n'), ((28, 15, 28, 32), 'flask.redirect', 'redirect', ({(28, 24, 28, 31): '"""/news"""'}, {}), "('/news')", False, 'from flask import Blueprint, redirect\n'), ((36, 8, 36, 28), 'flaskbb.extensions.db.session.delete', 'db.session.delete', ({(36, 26, 36, 27): 'p'}, {}), '(p)', False, 'from flaskbb.extensions import db\n'), ((37, 8, 37, 27), 'flaskbb.extensions.db.session.commit', 'db.session.commit', ({}, {}), '()', False, 'from flaskbb.extensions import db\n'), ((38, 15, 38, 32), 'flask.redirect', 'redirect', ({(38, 24, 38, 31): '"""/news"""'}, {}), "('/news')", False, 'from flask import Blueprint, redirect\n')] |
nkhetia31/stix-shifter | stix_shifter_modules/aws_athena/tests/stix_translation/test_aws_athena_json_to_stix.py | ace07581cb227fd35e450b2f8871475227a041d0 | from stix_shifter_utils.stix_translation.src.json_to_stix import json_to_stix_translator
from stix_shifter_utils.stix_translation.src.utils.transformer_utils import get_module_transformers
from stix_shifter_modules.aws_athena.entry_point import EntryPoint
import unittest
MODULE = "aws_athena"
entry_point = EntryPoint()
map_data = entry_point.get_results_translator().map_data
data_source = {
"type": "identity",
"id": "identity--f431f809-377b-45e0-aa1c-6a4751cae5ff",
"name": "aws_athena",
"identity_class": "events"
}
options = {}
class TestAwsResultsToStix(unittest.TestCase):
"""
class to perform unit test case for Aws Athena logs translate results
"""
@staticmethod
def get_first(itr, constraint):
"""
return the obj in the itr if constraint is true
"""
return next(
(obj for obj in itr if constraint(obj)),
None
)
@staticmethod
def get_first_of_type(itr, typ):
"""
to check whether the object belongs to respective stix object
"""
return TestAwsResultsToStix.get_first(itr, lambda o: isinstance(o, dict) and o.get('type') == typ)
def test_common_prop(self):
"""
to test the common stix object properties
"""
data = {
"guardduty": {
"accountid": 979326520502,
"region": "us-east-1",
"type": "UnauthorizedAccess:EC2/SSHBruteForce",
"resource_instancedetails_networkinterfaces_0_privatednsname": "ip-172-31-60-104.ec2.internal",
"resource_instancedetails_networkinterfaces_0_privateipaddress": "172.31.60.104",
"resource_instancedetails_networkinterfaces_0_subnetid": "subnet-ea9d6be4",
"resource_instancedetails_networkinterfaces_0_publicdnsname": "ec2-18-210-22-128.compute-1."
"amazonaws.com",
"resource_instancedetails_networkinterfaces_0_vpcid": "vpc-10db926a",
"resource_instancedetails_networkinterfaces_0_publicip": "18.210.22.128",
"resource_instancedetails_networkinterfaces_0_networkinterfaceid": "eni-0203098cca62c3f21",
"resource_instancedetails_networkinterfaces_0_securitygroups_0_groupid": "sg-018edb43fcc81525f",
"resource_instancedetails_networkinterfaces_0_securitygroups_0_groupname": "launch-wizard-13",
"resource_instancedetails_imageid": "ami-0015fcaa5516c75ed",
"resource_instancedetails_instanceid": "i-031cb81e1f32a36e1",
"resource_instancedetails_availabilityzone": "us-east-1f",
"service_eventfirstseen": "2020-07-31T06:19:09Z",
"service_action_networkconnectionaction_protocol": "TCP",
"service_action_networkconnectionaction_remoteportdetails_port": "38420",
"service_action_networkconnectionaction_remoteipdetails_country_countryname": "Sweden",
"service_action_networkconnectionaction_remoteipdetails_ipaddressv4": "85.224.242.94",
"service_action_networkconnectionaction_remoteipdetails_city_cityname": "\u00d6rebro",
"service_action_networkconnectionaction_localportdetails_port": "22",
"service_eventlastseen": "2020-09-12T09:19:40Z",
"severity": 2,
"title": "85.224.242.94 is performing SSH brute force attacks against i-031cb81e1f32a36e1.",
"arn": "arn:aws:guardduty:us-east-1:979326520502:detector/6ab6e6ee780ed494f3b7ca56acdc74df/finding/"
"7ab9d1cb6248e05a0e419a79528761cb",
"createdat": "2020-07-31T06:37:13.745Z",
"description": "85.224.242.94 is performing SSH brute force attacks against i-031cb81e1f32a36e1. "
"Brute force attacks are used to gain unauthorized access to your instance by "
"guessing the SSH password.",
"finding_id": "7ab9d1cb6248e05a0e419a79528761cb",
"partition": "aws",
"resource": {
"instancedetails": {
"imagedescription": "Provided by Red Hat, Inc.",
"instancestate": "running",
"instancetype": "t2.large",
"launchtime": "2020-09-11T23:16:03Z",
"tags": {
"0": {
"key": "Name",
"value": "ArcSight Logger"
}
}
},
"resourcetype": "Instance"
},
"schemaversion": 2.0,
"service": {
"action": {
"actiontype": "NETWORK_CONNECTION",
"networkconnectionaction": {
"connectiondirection": "INBOUND",
"localportdetails": {
"portname": "SSH"
},
"remoteipdetails": {
"geolocation": {
"lat": "59.2741",
"lon": "15.2066"
},
"organization": {
"asn": "2119",
"asnorg": "Telenor Norge AS",
"isp": "Telenor Sverige AB",
"org": "Telenor Sverige AB"
}
},
"remoteportdetails": {
"portname": "Unknown"
}
}
},
"count": "20",
"detectorid": "6ab6e6ee780ed494f3b7ca56acdc74df",
"resourcerole": "TARGET",
"servicename": "guardduty"
},
"updatedat": "2020-09-12T09:25:34.086Z"
}
}
result_bundle = json_to_stix_translator.convert_to_stix(
data_source, map_data, [data], get_module_transformers(MODULE), options)
assert result_bundle['type'] == 'bundle'
result_bundle_objects = result_bundle['objects']
result_bundle_identity = result_bundle_objects[0]
assert result_bundle_identity['type'] == data_source['type']
assert result_bundle_identity['id'] == data_source['id']
assert result_bundle_identity['name'] == data_source['name']
assert result_bundle_identity['identity_class'] == data_source['identity_class']
observed_data = result_bundle_objects[1]
assert observed_data['id'] is not None
assert observed_data['type'] == "observed-data"
assert observed_data['created_by_ref'] == result_bundle_identity['id']
assert observed_data['created'] is not None
assert observed_data['modified'] is not None
assert observed_data['number_observed'] is not None
def test_vpc_flow_network_json_to_stix(self):
"""to test network stix object properties"""
data = {
"vpcflow": {
"account": 979326520502,
"interfaceid": "eni-04b762de832716892",
"sourceaddress": "89.248.172.85",
"destinationaddress": "172.31.62.249",
"sourceport": 58387,
"destinationport": 51289,
"protocol": "tcp",
"starttime": 1592547796,
"endtime": 1592547798,
"action": "REJECT",
"date": "2020-06-19",
"logstatus": "OK",
"numbytes": 40,
"region": "us-east-1",
"version": 2
}
}
result_bundle = json_to_stix_translator.convert_to_stix(
data_source, map_data, [data], get_module_transformers(MODULE), options)
result_bundle_objects = result_bundle['objects']
result_bundle_identity = result_bundle_objects[0]
assert result_bundle_identity['type'] == data_source['type']
observed_data = result_bundle_objects[1]
assert 'objects' in observed_data
objects = observed_data['objects']
network_obj = TestAwsResultsToStix.get_first_of_type(objects.values(), 'network-traffic')
assert network_obj is not None, 'network-traffic object type not found'
assert network_obj.keys() == {'type', 'src_ref', 'dst_ref', 'src_port', 'dst_port', 'protocols', 'start', 'end'}
assert network_obj['type'] == 'network-traffic'
assert network_obj['src_ref'] == '1'
assert network_obj['dst_ref'] == '4'
assert network_obj['src_port'] == 58387
assert network_obj['dst_port'] == 51289
assert network_obj['protocols'] == ['tcp']
assert network_obj['start'] == '2020-06-19T06:23:16.000Z'
assert network_obj['end'] == '2020-06-19T06:23:18.000Z'
def test_vpc_flow_custom_attr_json_to_stix(self):
"""to test network stix object properties"""
data = {
"vpcflow": {
"account": 979326520502,
"interfaceid": "eni-04b762de832716892",
"sourceaddress": "89.248.172.85",
"destinationaddress": "172.31.62.249",
"sourceport": 58387,
"destinationport": 51289,
"protocol": "tcp",
"starttime": 1592547796,
"endtime": 1592547798,
"action": "REJECT",
"date": "2020-06-19",
"logstatus": "OK",
"numbytes": 40,
"region": "us-east-1",
"version": 2
}
}
options = {"unmapped_fallback": True}
result_bundle = json_to_stix_translator.convert_to_stix(
data_source, map_data, [data], get_module_transformers(MODULE), options)
result_bundle_objects = result_bundle['objects']
result_bundle_identity = result_bundle_objects[0]
assert result_bundle_identity['type'] == data_source['type']
observed_data = result_bundle_objects[1]
assert 'objects' in observed_data
objects = observed_data['objects']
custom_object = TestAwsResultsToStix.get_first_of_type(objects.values(), 'x-aws-athena')
assert custom_object.keys() == {'type', 'interfaceid', 'date', 'logstatus', 'numbytes', 'region', 'version'}
assert custom_object['date'] == '2020-06-19'
assert custom_object['logstatus'] == 'OK'
assert custom_object['numbytes'] == 40
assert custom_object['region'] == 'us-east-1'
assert custom_object['version'] == 2
def test_guardduty_network_json_to_stix(self):
"""to test network stix object properties"""
data = {
"guardduty": {
"accountid": 979326520502,
"region": "us-east-1",
"type": "UnauthorizedAccess:EC2/SSHBruteForce",
"resource_instancedetails_networkinterfaces_0_privatednsname": "ip-172-31-60-104.ec2.internal",
"resource_instancedetails_networkinterfaces_0_privateipaddress": "172.31.60.104",
"resource_instancedetails_networkinterfaces_0_subnetid": "subnet-ea9d6be4",
"resource_instancedetails_networkinterfaces_0_publicdnsname": "ec2-18-210-22-128.compute-1."
"amazonaws.com",
"resource_instancedetails_networkinterfaces_0_vpcid": "vpc-10db926a",
"resource_instancedetails_networkinterfaces_0_publicip": "18.210.22.128",
"resource_instancedetails_networkinterfaces_0_networkinterfaceid": "eni-0203098cca62c3f21",
"resource_instancedetails_networkinterfaces_0_securitygroups_0_groupid": "sg-018edb43fcc81525f",
"resource_instancedetails_networkinterfaces_0_securitygroups_0_groupname": "launch-wizard-13",
"resource_instancedetails_imageid": "ami-0015fcaa5516c75ed",
"resource_instancedetails_instanceid": "i-031cb81e1f32a36e1",
"resource_instancedetails_availabilityzone": "us-east-1f",
"service_eventfirstseen": "2020-07-31T06:19:09Z",
"service_action_networkconnectionaction_protocol": "TCP",
"service_action_networkconnectionaction_remoteportdetails_port": "38420",
"service_action_networkconnectionaction_remoteipdetails_country_countryname": "Sweden",
"service_action_networkconnectionaction_remoteipdetails_ipaddressv4": "85.224.242.94",
"service_action_networkconnectionaction_remoteipdetails_city_cityname": "rebro",
"service_action_networkconnectionaction_localportdetails_port": "22",
"service_eventlastseen": "2020-09-12T09:19:40Z",
"severity": 2,
"title": "85.224.242.94 is performing SSH brute force attacks against i-031cb81e1f32a36e1.",
"arn": "arn:aws:guardduty:us-east-1:979326520502:detector/6ab6e6ee780ed494f3b7ca56acdc74df/finding"
"/7ab9d1cb6248e05a0e419a79528761cb",
"createdat": "2020-07-31T06:37:13.745Z",
"description": "85.224.242.94 is performing SSH brute force attacks against i-031cb81e1f32a36e1. "
"Brute force attacks are used to gain unauthorized access to your instance by "
"guessing the SSH password.",
"finding_id": "7ab9d1cb6248e05a0e419a79528761cb",
"partition": "aws",
"resource": {
"instancedetails": {
"imagedescription": "Provided by Red Hat, Inc.",
"instancestate": "running",
"instancetype": "t2.large",
"launchtime": "2020-09-11T23:16:03Z",
"tags": {
"0": {
"key": "Name",
"value": "ArcSight Logger"
}
}
},
"resourcetype": "Instance"
},
"schemaversion": 2.0,
"service": {
"action": {
"actiontype": "NETWORK_CONNECTION",
"networkconnectionaction": {
"connectiondirection": "INBOUND",
"localportdetails": {
"portname": "SSH"
},
"remoteipdetails": {
"geolocation": {
"lat": "59.2741",
"lon": "15.2066"
},
"organization": {
"asn": "2119",
"asnorg": "Telenor Norge AS",
"isp": "Telenor Sverige AB",
"org": "Telenor Sverige AB"
}
},
"remoteportdetails": {
"portname": "Unknown"
}
}
},
"count": "20",
"detectorid": "6ab6e6ee780ed494f3b7ca56acdc74df",
"resourcerole": "TARGET",
"servicename": "guardduty"
},
"updatedat": "2020-09-12T09:25:34.086Z"
}
}
result_bundle = json_to_stix_translator.convert_to_stix(
data_source, map_data, [data], get_module_transformers(MODULE), options)
result_bundle_objects = result_bundle['objects']
result_bundle_identity = result_bundle_objects[0]
assert result_bundle_identity['type'] == data_source['type']
observed_data = result_bundle_objects[1]
assert 'objects' in observed_data
objects = observed_data['objects']
network_obj = TestAwsResultsToStix.get_first_of_type(objects.values(), 'network-traffic')
assert network_obj is not None, 'network-traffic object type not found'
assert network_obj.keys() == {'type', 'dst_port', 'src_ref', 'dst_ref', 'src_port', 'protocols'}
assert network_obj['type'] == 'network-traffic'
assert network_obj['dst_port'] == 38420
assert network_obj['src_ref'] == '3'
assert network_obj['dst_ref'] == '9'
assert network_obj['src_port'] == 22
assert network_obj['protocols'] == ['tcp']
def test_guardduty_custom_attr_json_to_stix(self):
"""to test network stix object properties"""
data = {
"guardduty": {
"accountid": 979326520502,
"region": "us-east-1",
"type": "UnauthorizedAccess:EC2/SSHBruteForce",
"resource_instancedetails_networkinterfaces_0_privatednsname": "ip-172-31-60-104.ec2.internal",
"resource_instancedetails_networkinterfaces_0_privateipaddress": "172.31.60.104",
"resource_instancedetails_networkinterfaces_0_subnetid": "subnet-ea9d6be4",
"resource_instancedetails_networkinterfaces_0_publicdnsname": "ec2-18-210-22-128.compute-1."
"amazonaws.com",
"resource_instancedetails_networkinterfaces_0_vpcid": "vpc-10db926a",
"resource_instancedetails_networkinterfaces_0_publicip": "18.210.22.128",
"resource_instancedetails_networkinterfaces_0_networkinterfaceid": "eni-0203098cca62c3f21",
"resource_instancedetails_networkinterfaces_0_securitygroups_0_groupid": "sg-018edb43fcc81525f",
"resource_instancedetails_networkinterfaces_0_securitygroups_0_groupname": "launch-wizard-13",
"resource_instancedetails_imageid": "ami-0015fcaa5516c75ed",
"resource_instancedetails_instanceid": "i-031cb81e1f32a36e1",
"resource_instancedetails_availabilityzone": "us-east-1f",
"service_eventfirstseen": "2020-07-31T06:19:09Z",
"service_action_networkconnectionaction_protocol": "TCP",
"service_action_networkconnectionaction_remoteportdetails_port": "38420",
"service_action_networkconnectionaction_remoteipdetails_country_countryname": "Sweden",
"service_action_networkconnectionaction_remoteipdetails_ipaddressv4": "85.224.242.94",
"service_action_networkconnectionaction_remoteipdetails_city_cityname": "rebro",
"service_action_networkconnectionaction_localportdetails_port": "22",
"service_eventlastseen": "2020-09-12T09:19:40Z",
"severity": 2,
"title": "85.224.242.94 is performing SSH brute force attacks against i-031cb81e1f32a36e1.",
"arn": "arn:aws:guardduty:us-east-1:979326520502:detector/6ab6e6ee780ed494f3b7ca56acdc74df/finding/"
"7ab9d1cb6248e05a0e419a79528761cb",
"createdat": "2020-07-31T06:37:13.745Z",
"description": "85.224.242.94 is performing SSH brute force attacks against i-031cb81e1f32a36e1."
" Brute force attacks are used to gain unauthorized access to your instance by guessing "
"the SSH password.",
"finding_id": "7ab9d1cb6248e05a0e419a79528761cb",
"partition": "aws",
"resource": {
"instancedetails": {
"imagedescription": "Provided by Red Hat, Inc.",
"instancestate": "running",
"instancetype": "t2.large",
"launchtime": "2020-09-11T23:16:03Z",
"tags": {
"0": {
"key": "Name",
"value": "ArcSight Logger"
}
}
},
"resourcetype": "Instance"
},
"schemaversion": 2.0,
"service": {
"action": {
"actiontype": "NETWORK_CONNECTION",
"networkconnectionaction": {
"connectiondirection": "INBOUND",
"localportdetails": {
"portname": "SSH"
},
"remoteipdetails": {
"geolocation": {
"lat": "59.2741",
"lon": "15.2066"
},
"organization": {
"asn": "2119",
"asnorg": "Telenor Norge AS",
"isp": "Telenor Sverige AB",
"org": "Telenor Sverige AB"
}
},
"remoteportdetails": {
"portname": "Unknown"
}
}
},
"count": "20",
"detectorid": "6ab6e6ee780ed494f3b7ca56acdc74df",
"resourcerole": "TARGET",
"servicename": "guardduty"
},
"updatedat": "2020-09-12T09:25:34.086Z"
}
}
options = {"unmapped_fallback": True}
result_bundle = json_to_stix_translator.convert_to_stix(
data_source, map_data, [data], get_module_transformers(MODULE), options)
result_bundle_objects = result_bundle['objects']
result_bundle_identity = result_bundle_objects[0]
assert result_bundle_identity['type'] == data_source['type']
observed_data = result_bundle_objects[1]
assert 'objects' in observed_data
objects = observed_data['objects']
custom_object = TestAwsResultsToStix.get_first_of_type(objects.values(), 'x-aws-athena')
assert custom_object.keys() == {'type', 'service_action_networkconnectionaction_remoteipdetails_country_countryname',
'finding_id', 'arn', 'createdat', 'partition', 'resource',
'schemaversion', 'service', 'updatedat'}
assert custom_object['arn'] == 'arn:aws:guardduty:us-east-1:979326520502:detector/6ab6e6ee780ed' \
'494f3b7ca56acdc74df/finding/7ab9d1cb6248e05a0e419a79528761cb'
assert custom_object['finding_id'] == '7ab9d1cb6248e05a0e419a79528761cb'
assert custom_object['createdat'] == '2020-07-31T06:37:13.745Z'
assert custom_object['partition'] == 'aws'
assert custom_object['schemaversion'] == 2.0
assert custom_object['updatedat'] == '2020-09-12T09:25:34.086Z'
| [((7, 14, 7, 26), 'stix_shifter_modules.aws_athena.entry_point.EntryPoint', 'EntryPoint', ({}, {}), '()', False, 'from stix_shifter_modules.aws_athena.entry_point import EntryPoint\n'), ((131, 43, 131, 74), 'stix_shifter_utils.stix_translation.src.utils.transformer_utils.get_module_transformers', 'get_module_transformers', ({(131, 67, 131, 73): 'MODULE'}, {}), '(MODULE)', False, 'from stix_shifter_utils.stix_translation.src.utils.transformer_utils import get_module_transformers\n'), ((171, 43, 171, 74), 'stix_shifter_utils.stix_translation.src.utils.transformer_utils.get_module_transformers', 'get_module_transformers', ({(171, 67, 171, 73): 'MODULE'}, {}), '(MODULE)', False, 'from stix_shifter_utils.stix_translation.src.utils.transformer_utils import get_module_transformers\n'), ((216, 43, 216, 74), 'stix_shifter_utils.stix_translation.src.utils.transformer_utils.get_module_transformers', 'get_module_transformers', ({(216, 67, 216, 73): 'MODULE'}, {}), '(MODULE)', False, 'from stix_shifter_utils.stix_translation.src.utils.transformer_utils import get_module_transformers\n'), ((323, 43, 323, 74), 'stix_shifter_utils.stix_translation.src.utils.transformer_utils.get_module_transformers', 'get_module_transformers', ({(323, 67, 323, 73): 'MODULE'}, {}), '(MODULE)', False, 'from stix_shifter_utils.stix_translation.src.utils.transformer_utils import get_module_transformers\n'), ((433, 43, 433, 74), 'stix_shifter_utils.stix_translation.src.utils.transformer_utils.get_module_transformers', 'get_module_transformers', ({(433, 67, 433, 73): 'MODULE'}, {}), '(MODULE)', False, 'from stix_shifter_utils.stix_translation.src.utils.transformer_utils import get_module_transformers\n')] |
CodingGorit/Coding-with-Python | Python Spider/xpath/03 login.py | b0f1d5d704b816a85b0ae57b46d00314de2a67b9 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#file: 03 login.py
#@author: Gorit
#@contact: [email protected]
#@time: 2020/1/20 12:44
import requests
from lxml import etree
# 封装类,进行学习猿地的登录和订单的获取
class lMonKey():
# 登录请求地址
loginUrl = "https://www.lmonkey.com/login"
# 账户中心地址
orderUrl = "https://www.lmonkey.com/my/order"
headers = {
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3741.400 QQBrowser/10.5.3863.400"
}
# 请求对象
req = None
# token 口令
token = ''
# 订单号
# 初始化的方法
def __init__(self):
# 请求对象的初始化
self.req = requests.session()
if self.getlogin(): # get 登录成功
if self.postlogin(): # post 登录成功
self.getordder()
# get 登录页面,获取 _token
def getlogin(self):
# 1. get 请求 login页面,设置 cookie,获取_token
res = self.req.get(url=self.loginUrl,headers=self.headers)
if res.status_code == 200:
print("get 页面请求成功")
html = etree.HTML(res.text)
self.token = html.xpath("//input[@name='_token']/@value")[0]
#找到 input 标签下的,属性为 name="_token" 的标签,找它的 vcalue 的值,也就是 token 的值
# input[@name='xxx'] 找到指定标签
print("token 获取成功")
return True
else:
print("请求错误")
# post 登录,设置 cookie
def postlogin(self):
uname = input("输入你的手机号:")
passw = input("请输入你的密码:")
data = {
"_token": self.token,
"username": uname,
"password": passw
}
# 发起 post 请求
res = self.req.post(url=self.loginUrl,headers=self.headers,data=data)
if res.status_code==200 or res.status_code==302:
print("登录成功!!")
return True
def getordder(self):
# 获取订单页,使用 get 请求即可,获取默认订单号
# 解析数据即可
res = self.req.get(url=self.orderUrl,headers=self.headers)
if res.status_code == 200:
print("请求订单页页面成功")
html = etree.HTML(res.text)
# 頁面解析
r = html.xpath("//div[@class='avatar-content']/small/text()")
print(r)
else:
print("頁面請求失敗")
obj = lMonKey()
| [((31, 19, 31, 37), 'requests.session', 'requests.session', ({}, {}), '()', False, 'import requests\n'), ((43, 19, 43, 39), 'lxml.etree.HTML', 'etree.HTML', ({(43, 30, 43, 38): 'res.text'}, {}), '(res.text)', False, 'from lxml import etree\n'), ((74, 19, 74, 39), 'lxml.etree.HTML', 'etree.HTML', ({(74, 30, 74, 38): 'res.text'}, {}), '(res.text)', False, 'from lxml import etree\n')] |
fireclawthefox/AnkandoraLight | src/gui/MultiplayerPlayerInfo.py | 05b71e1a2919141cce02cb1aade95fbac682614b | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file was created using the DirectGUI Designer
from direct.gui import DirectGuiGlobals as DGG
from direct.gui.DirectFrame import DirectFrame
from direct.gui.DirectLabel import DirectLabel
from direct.gui.DirectButton import DirectButton
from direct.gui.DirectOptionMenu import DirectOptionMenu
from panda3d.core import (
LPoint3f,
LVecBase3f,
LVecBase4f,
TextNode
)
class GUI:
def __init__(self, rootParent=None):
self.frmMain = DirectFrame(
frameColor=(1, 1, 1, 1),
frameSize=(-1.777778, 1.77777778, -1.1638, 1.1638),
hpr=LVecBase3f(0, 0, 0),
image='assets/menu/Background.png',
pos=LPoint3f(0, 0, 0),
image_scale=LVecBase3f(1.77778, 1, 1.1638),
image_pos=LPoint3f(0, 0, 0),
parent=rootParent,
)
self.frmMain.setTransparency(0)
self.frmSinglePlayerCreateGame = DirectFrame(
borderWidth=(0.01, 0.01),
frameColor=(1, 1, 1, 1),
frameSize=(-0.65, 0.65, -0.55, 0.55),
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(-0.425, 0, 0),
relief=5,
parent=self.frmMain,
)
self.frmSinglePlayerCreateGame.setTransparency(0)
self.pg703 = DirectLabel(
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(0, 0, 0.425),
scale=LVecBase3f(0.1, 0.1, 0.1),
text='Player Info',
text_align=TextNode.A_center,
text_scale=(1, 1),
text_pos=(0, 0),
text_fg=LVecBase4f(0, 0, 0, 1),
text_bg=LVecBase4f(0, 0, 0, 0),
text_wordwrap=None,
parent=self.frmSinglePlayerCreateGame,
)
self.pg703.setTransparency(0)
self.pg13803 = DirectButton(
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(-0.35, 0, -0.45),
scale=LVecBase3f(0.1, 0.1, 0.1),
text='Start',
text_align=TextNode.A_center,
text_scale=(1, 1),
text_pos=(0, 0),
text_fg=LVecBase4f(0, 0, 0, 1),
text_bg=LVecBase4f(0, 0, 0, 0),
text_wordwrap=None,
parent=self.frmSinglePlayerCreateGame,
command=base.messenger.send,
extraArgs=["multiplayerPlayerInfo_start"],
)
self.pg13803.setTransparency(0)
self.pg5219 = DirectLabel(
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(-0.6, 0, 0.02),
scale=LVecBase3f(0.1, 0.1, 0.1),
text='Player Class',
text_align=TextNode.A_left,
text_scale=(1, 1),
text_pos=(0, 0),
text_fg=LVecBase4f(0, 0, 0, 1),
text_bg=LVecBase4f(0, 0, 0, 0),
text_wordwrap=None,
parent=self.frmSinglePlayerCreateGame,
)
self.pg5219.setTransparency(0)
self.optionPlayerClass = DirectOptionMenu(
items=['item1'],
frameSize=(0.07500000298023224, 3.012500149011612, -0.11250001192092896, 0.75),
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(0.2, 0, 0.005),
scale=LVecBase3f(0.1, 0.1, 0.1),
text='item1',
cancelframe_frameSize=(-1, 1, -1, 1),
cancelframe_hpr=LVecBase3f(0, 0, 0),
cancelframe_pos=LPoint3f(0, 0, 0),
cancelframe_relief=None,
item_frameSize=(0.07500000298023224, 2.4125001430511475, -0.11250001192092896, 0.75),
item_hpr=LVecBase3f(0, 0, 0),
item_pos=LPoint3f(-0.075, 0, -0.75),
item_text='item1',
item0_text_align=TextNode.A_left,
item0_text_scale=(1, 1),
item0_text_pos=(0, 0),
item0_text_fg=LVecBase4f(0, 0, 0, 1),
item0_text_bg=LVecBase4f(0, 0, 0, 0),
item0_text_wordwrap=None,
popupMarker_frameSize=(-0.5, 0.5, -0.2, 0.2),
popupMarker_hpr=LVecBase3f(0, 0, 0),
popupMarker_pos=LPoint3f(2.7125, 0, 0.31875),
popupMarker_relief=2,
popupMarker_scale=LVecBase3f(0.4, 0.4, 0.4),
popupMenu_frameSize=(0, 2.3375001400709152, -0.862500011920929, 0),
popupMenu_hpr=LVecBase3f(0, 0, 0),
popupMenu_pos=LPoint3f(0, 0, 0),
popupMenu_relief='raised',
text_align=TextNode.A_left,
text_scale=(1, 1),
text_pos=(0, 0),
text_fg=LVecBase4f(0, 0, 0, 1),
text_bg=LVecBase4f(0, 0, 0, 0),
text_wordwrap=None,
parent=self.frmSinglePlayerCreateGame,
)
self.optionPlayerClass.setTransparency(0)
self.btnCancel = DirectButton(
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(0.325, 0, -0.45),
scale=LVecBase3f(0.1, 0.1, 0.1),
text='Cancel',
text_align=TextNode.A_center,
text_scale=(1, 1),
text_pos=(0, 0),
text_fg=LVecBase4f(0, 0, 0, 1),
text_bg=LVecBase4f(0, 0, 0, 0),
text_wordwrap=None,
parent=self.frmSinglePlayerCreateGame,
command=base.messenger.send,
extraArgs=["multiplayerPlayerInfo_cancel"],
)
self.btnCancel.setTransparency(0)
self.frmPlayerInfo = DirectFrame(
borderWidth=(0.01, 0.01),
frameColor=(1, 1, 1, 1),
frameSize=(-0.5, 0.5, -0.55, 0.55),
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(0.765, 0, 0),
relief=3,
parent=self.frmMain,
)
self.frmPlayerInfo.setTransparency(0)
self.lblInfoHeader = DirectLabel(
frameColor=(0.8, 0.8, 0.8, 0.0),
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(0, 0, 0.45),
scale=LVecBase3f(0.1, 0.1, 0.1),
text='Info',
text_align=TextNode.A_center,
text_scale=(1, 1),
text_pos=(0, 0),
text_fg=LVecBase4f(0, 0, 0, 1),
text_bg=LVecBase4f(0, 0, 0, 0),
text_wordwrap=None,
parent=self.frmPlayerInfo,
)
self.lblInfoHeader.setTransparency(0)
self.frmImageHero = DirectFrame(
frameColor=(1, 1, 1, 1),
frameSize=(-0.15, 0.15, -0.2, 0.2),
hpr=LVecBase3f(0, 0, 0),
image='/home/fireclaw/workspace/Ankandora/AnkandoraLight/design/guiGraphics/heroArcher.png',
pos=LPoint3f(-0.275, 0, 0.195),
image_scale=LVecBase3f(0.15, 1, 0.2),
image_pos=LPoint3f(0, 0, 0),
parent=self.frmPlayerInfo,
)
self.frmImageHero.setTransparency(1)
self.lblClassDescription = DirectLabel(
frameColor=(0.8, 0.8, 0.8, 0.0),
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(-0.12, 0, 0.31),
scale=LVecBase3f(0.1, 0.1, 0.1),
text='The archer shoots from afar and gains the first-strike',
text_align=TextNode.A_left,
text_scale=(0.6, 0.6),
text_pos=(0, 0),
text_fg=LVecBase4f(0, 0, 0, 1),
text_bg=LVecBase4f(0, 0, 0, 0),
text_wordwrap=10.0,
parent=self.frmPlayerInfo,
)
self.lblClassDescription.setTransparency(0)
self.lblHealth = DirectLabel(
frameColor=(0.8, 0.8, 0.8, 0.0),
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(-0.28, 0, -0.1),
scale=LVecBase3f(0.1, 0.1, 0.1),
text='Health',
text_align=TextNode.A_center,
text_scale=(0.7, 0.7),
text_pos=(0, 0),
text_fg=LVecBase4f(0, 0, 0, 1),
text_bg=LVecBase4f(0, 0, 0, 0),
text_wordwrap=None,
parent=self.frmPlayerInfo,
)
self.lblHealth.setTransparency(0)
self.lblAttack = DirectLabel(
frameColor=(0.8, 0.8, 0.8, 0.0),
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(-0.275, 0, -0.285),
scale=LVecBase3f(0.1, 0.1, 0.1),
text='Attack',
text_align=TextNode.A_center,
text_scale=(0.7, 0.7),
text_pos=(0, 0),
text_fg=LVecBase4f(0, 0, 0, 1),
text_bg=LVecBase4f(0, 0, 0, 0),
text_wordwrap=None,
parent=self.frmPlayerInfo,
)
self.lblAttack.setTransparency(0)
self.lblHealthValue = DirectLabel(
frameColor=(0.8, 0.8, 0.8, 0.0),
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(-0.275, 0, -0.17),
scale=LVecBase3f(0.1, 0.1, 0.1),
text='7',
text_align=TextNode.A_center,
text_scale=(0.6, 0.6),
text_pos=(0, 0),
text_fg=LVecBase4f(0, 0, 0, 1),
text_bg=LVecBase4f(0, 0, 0, 0),
text_wordwrap=None,
parent=self.frmPlayerInfo,
)
self.lblHealthValue.setTransparency(0)
self.lblAttackValue = DirectLabel(
frameColor=(0.8, 0.8, 0.8, 0.0),
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(-0.275, 0, -0.36),
scale=LVecBase3f(0.1, 0.1, 0.1),
text='4',
text_align=TextNode.A_center,
text_scale=(0.6, 0.6),
text_pos=(0, 0),
text_fg=LVecBase4f(0, 0, 0, 1),
text_bg=LVecBase4f(0, 0, 0, 0),
text_wordwrap=None,
parent=self.frmPlayerInfo,
)
self.lblAttackValue.setTransparency(0)
def show(self):
self.frmMain.show()
def hide(self):
self.frmMain.hide()
def destroy(self):
self.frmMain.destroy()
| [((25, 16, 25, 35), 'panda3d.core.LVecBase3f', 'LVecBase3f', ({(25, 27, 25, 28): '0', (25, 30, 25, 31): '0', (25, 33, 25, 34): '0'}, {}), '(0, 0, 0)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((27, 16, 27, 33), 'panda3d.core.LPoint3f', 'LPoint3f', ({(27, 25, 27, 26): '0', (27, 28, 27, 29): '0', (27, 31, 27, 32): '0'}, {}), '(0, 0, 0)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((28, 24, 28, 54), 'panda3d.core.LVecBase3f', 'LVecBase3f', ({(28, 35, 28, 42): '1.77778', (28, 44, 28, 45): '1', (28, 47, 28, 53): '1.1638'}, {}), '(1.77778, 1, 1.1638)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((29, 22, 29, 39), 'panda3d.core.LPoint3f', 'LPoint3f', ({(29, 31, 29, 32): '0', (29, 34, 29, 35): '0', (29, 37, 29, 38): '0'}, {}), '(0, 0, 0)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((38, 16, 38, 35), 'panda3d.core.LVecBase3f', 'LVecBase3f', ({(38, 27, 38, 28): '0', (38, 30, 38, 31): '0', (38, 33, 38, 34): '0'}, {}), '(0, 0, 0)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((39, 16, 39, 38), 'panda3d.core.LPoint3f', 'LPoint3f', ({(39, 25, 39, 31): '-0.425', (39, 33, 39, 34): '0', (39, 36, 39, 37): '0'}, {}), '(-0.425, 0, 0)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((46, 16, 46, 35), 'panda3d.core.LVecBase3f', 'LVecBase3f', ({(46, 27, 46, 28): '0', (46, 30, 46, 31): '0', (46, 33, 46, 34): '0'}, {}), '(0, 0, 0)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((47, 16, 47, 37), 'panda3d.core.LPoint3f', 'LPoint3f', ({(47, 25, 47, 26): '0', (47, 28, 47, 29): '0', (47, 31, 47, 36): '0.425'}, {}), '(0, 0, 0.425)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((48, 18, 48, 43), 'panda3d.core.LVecBase3f', 'LVecBase3f', ({(48, 29, 48, 32): '0.1', (48, 34, 48, 37): '0.1', (48, 39, 48, 42): '0.1'}, {}), '(0.1, 0.1, 0.1)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((53, 20, 53, 42), 'panda3d.core.LVecBase4f', 'LVecBase4f', ({(53, 31, 53, 32): '0', (53, 34, 53, 35): '0', (53, 37, 53, 38): '0', (53, 40, 53, 41): '1'}, {}), '(0, 0, 0, 1)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((54, 20, 54, 42), 'panda3d.core.LVecBase4f', 'LVecBase4f', ({(54, 31, 54, 32): '0', (54, 34, 54, 35): '0', (54, 37, 54, 38): '0', (54, 40, 54, 41): '0'}, {}), '(0, 0, 0, 0)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((61, 16, 61, 35), 'panda3d.core.LVecBase3f', 'LVecBase3f', ({(61, 27, 61, 28): '0', (61, 30, 61, 31): '0', (61, 33, 61, 34): '0'}, {}), '(0, 0, 0)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((62, 16, 62, 41), 'panda3d.core.LPoint3f', 'LPoint3f', ({(62, 25, 62, 30): '-0.35', (62, 32, 62, 33): '0', (62, 35, 62, 40): '-0.45'}, {}), '(-0.35, 0, -0.45)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((63, 18, 63, 43), 'panda3d.core.LVecBase3f', 'LVecBase3f', ({(63, 29, 63, 32): '0.1', (63, 34, 63, 37): '0.1', (63, 39, 63, 42): '0.1'}, {}), '(0.1, 0.1, 0.1)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((68, 20, 68, 42), 'panda3d.core.LVecBase4f', 'LVecBase4f', ({(68, 31, 68, 32): '0', (68, 34, 68, 35): '0', (68, 37, 68, 38): '0', (68, 40, 68, 41): '1'}, {}), '(0, 0, 0, 1)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((69, 20, 69, 42), 'panda3d.core.LVecBase4f', 'LVecBase4f', ({(69, 31, 69, 32): '0', (69, 34, 69, 35): '0', (69, 37, 69, 38): '0', (69, 40, 69, 41): '0'}, {}), '(0, 0, 0, 0)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((78, 16, 78, 35), 'panda3d.core.LVecBase3f', 'LVecBase3f', ({(78, 27, 78, 28): '0', (78, 30, 78, 31): '0', (78, 33, 78, 34): '0'}, {}), '(0, 0, 0)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((79, 16, 79, 39), 'panda3d.core.LPoint3f', 'LPoint3f', ({(79, 25, 79, 29): '-0.6', (79, 31, 79, 32): '0', (79, 34, 79, 38): '0.02'}, {}), '(-0.6, 0, 0.02)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((80, 18, 80, 43), 'panda3d.core.LVecBase3f', 'LVecBase3f', ({(80, 29, 80, 32): '0.1', (80, 34, 80, 37): '0.1', (80, 39, 80, 42): '0.1'}, {}), '(0.1, 0.1, 0.1)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((85, 20, 85, 42), 'panda3d.core.LVecBase4f', 'LVecBase4f', ({(85, 31, 85, 32): '0', (85, 34, 85, 35): '0', (85, 37, 85, 38): '0', (85, 40, 85, 41): '1'}, {}), '(0, 0, 0, 1)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((86, 20, 86, 42), 'panda3d.core.LVecBase4f', 'LVecBase4f', ({(86, 31, 86, 32): '0', (86, 34, 86, 35): '0', (86, 37, 86, 38): '0', (86, 40, 86, 41): '0'}, {}), '(0, 0, 0, 0)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((95, 16, 95, 35), 'panda3d.core.LVecBase3f', 'LVecBase3f', ({(95, 27, 95, 28): '0', (95, 30, 95, 31): '0', (95, 33, 95, 34): '0'}, {}), '(0, 0, 0)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((96, 16, 96, 39), 'panda3d.core.LPoint3f', 'LPoint3f', ({(96, 25, 96, 28): '0.2', (96, 30, 96, 31): '0', (96, 33, 96, 38): '0.005'}, {}), '(0.2, 0, 0.005)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((97, 18, 97, 43), 'panda3d.core.LVecBase3f', 'LVecBase3f', ({(97, 29, 97, 32): '0.1', (97, 34, 97, 37): '0.1', (97, 39, 97, 42): '0.1'}, {}), '(0.1, 0.1, 0.1)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((100, 28, 100, 47), 'panda3d.core.LVecBase3f', 'LVecBase3f', ({(100, 39, 100, 40): '0', (100, 42, 100, 43): '0', (100, 45, 100, 46): '0'}, {}), '(0, 0, 0)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((101, 28, 101, 45), 'panda3d.core.LPoint3f', 'LPoint3f', ({(101, 37, 101, 38): '0', (101, 40, 101, 41): '0', (101, 43, 101, 44): '0'}, {}), '(0, 0, 0)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((104, 21, 104, 40), 'panda3d.core.LVecBase3f', 'LVecBase3f', ({(104, 32, 104, 33): '0', (104, 35, 104, 36): '0', (104, 38, 104, 39): '0'}, {}), '(0, 0, 0)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((105, 21, 105, 47), 'panda3d.core.LPoint3f', 'LPoint3f', ({(105, 30, 105, 36): '-0.075', (105, 38, 105, 39): '0', (105, 41, 105, 46): '-0.75'}, {}), '(-0.075, 0, -0.75)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((110, 26, 110, 48), 'panda3d.core.LVecBase4f', 'LVecBase4f', ({(110, 37, 110, 38): '0', (110, 40, 110, 41): '0', (110, 43, 110, 44): '0', (110, 46, 110, 47): '1'}, {}), '(0, 0, 0, 1)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((111, 26, 111, 48), 'panda3d.core.LVecBase4f', 'LVecBase4f', ({(111, 37, 111, 38): '0', (111, 40, 111, 41): '0', (111, 43, 111, 44): '0', (111, 46, 111, 47): '0'}, {}), '(0, 0, 0, 0)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((114, 28, 114, 47), 'panda3d.core.LVecBase3f', 'LVecBase3f', ({(114, 39, 114, 40): '0', (114, 42, 114, 43): '0', (114, 45, 114, 46): '0'}, {}), '(0, 0, 0)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((115, 28, 115, 56), 'panda3d.core.LPoint3f', 'LPoint3f', ({(115, 37, 115, 43): '2.7125', (115, 45, 115, 46): '0', (115, 48, 115, 55): '0.31875'}, {}), '(2.7125, 0, 0.31875)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((117, 30, 117, 55), 'panda3d.core.LVecBase3f', 'LVecBase3f', ({(117, 41, 117, 44): '0.4', (117, 46, 117, 49): '0.4', (117, 51, 117, 54): '0.4'}, {}), '(0.4, 0.4, 0.4)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((119, 26, 119, 45), 'panda3d.core.LVecBase3f', 'LVecBase3f', ({(119, 37, 119, 38): '0', (119, 40, 119, 41): '0', (119, 43, 119, 44): '0'}, {}), '(0, 0, 0)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((120, 26, 120, 43), 'panda3d.core.LPoint3f', 'LPoint3f', ({(120, 35, 120, 36): '0', (120, 38, 120, 39): '0', (120, 41, 120, 42): '0'}, {}), '(0, 0, 0)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((125, 20, 125, 42), 'panda3d.core.LVecBase4f', 'LVecBase4f', ({(125, 31, 125, 32): '0', (125, 34, 125, 35): '0', (125, 37, 125, 38): '0', (125, 40, 125, 41): '1'}, {}), '(0, 0, 0, 1)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((126, 20, 126, 42), 'panda3d.core.LVecBase4f', 'LVecBase4f', ({(126, 31, 126, 32): '0', (126, 34, 126, 35): '0', (126, 37, 126, 38): '0', (126, 40, 126, 41): '0'}, {}), '(0, 0, 0, 0)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((133, 16, 133, 35), 'panda3d.core.LVecBase3f', 'LVecBase3f', ({(133, 27, 133, 28): '0', (133, 30, 133, 31): '0', (133, 33, 133, 34): '0'}, {}), '(0, 0, 0)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((134, 16, 134, 41), 'panda3d.core.LPoint3f', 'LPoint3f', ({(134, 25, 134, 30): '0.325', (134, 32, 134, 33): '0', (134, 35, 134, 40): '-0.45'}, {}), '(0.325, 0, -0.45)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((135, 18, 135, 43), 'panda3d.core.LVecBase3f', 'LVecBase3f', ({(135, 29, 135, 32): '0.1', (135, 34, 135, 37): '0.1', (135, 39, 135, 42): '0.1'}, {}), '(0.1, 0.1, 0.1)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((140, 20, 140, 42), 'panda3d.core.LVecBase4f', 'LVecBase4f', ({(140, 31, 140, 32): '0', (140, 34, 140, 35): '0', (140, 37, 140, 38): '0', (140, 40, 140, 41): '1'}, {}), '(0, 0, 0, 1)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((141, 20, 141, 42), 'panda3d.core.LVecBase4f', 'LVecBase4f', ({(141, 31, 141, 32): '0', (141, 34, 141, 35): '0', (141, 37, 141, 38): '0', (141, 40, 141, 41): '0'}, {}), '(0, 0, 0, 0)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((153, 16, 153, 35), 'panda3d.core.LVecBase3f', 'LVecBase3f', ({(153, 27, 153, 28): '0', (153, 30, 153, 31): '0', (153, 33, 153, 34): '0'}, {}), '(0, 0, 0)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((154, 16, 154, 37), 'panda3d.core.LPoint3f', 'LPoint3f', ({(154, 25, 154, 30): '0.765', (154, 32, 154, 33): '0', (154, 35, 154, 36): '0'}, {}), '(0.765, 0, 0)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((162, 16, 162, 35), 'panda3d.core.LVecBase3f', 'LVecBase3f', ({(162, 27, 162, 28): '0', (162, 30, 162, 31): '0', (162, 33, 162, 34): '0'}, {}), '(0, 0, 0)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((163, 16, 163, 36), 'panda3d.core.LPoint3f', 'LPoint3f', ({(163, 25, 163, 26): '0', (163, 28, 163, 29): '0', (163, 31, 163, 35): '0.45'}, {}), '(0, 0, 0.45)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((164, 18, 164, 43), 'panda3d.core.LVecBase3f', 'LVecBase3f', ({(164, 29, 164, 32): '0.1', (164, 34, 164, 37): '0.1', (164, 39, 164, 42): '0.1'}, {}), '(0.1, 0.1, 0.1)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((169, 20, 169, 42), 'panda3d.core.LVecBase4f', 'LVecBase4f', ({(169, 31, 169, 32): '0', (169, 34, 169, 35): '0', (169, 37, 169, 38): '0', (169, 40, 169, 41): '1'}, {}), '(0, 0, 0, 1)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((170, 20, 170, 42), 'panda3d.core.LVecBase4f', 'LVecBase4f', ({(170, 31, 170, 32): '0', (170, 34, 170, 35): '0', (170, 37, 170, 38): '0', (170, 40, 170, 41): '0'}, {}), '(0, 0, 0, 0)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((179, 16, 179, 35), 'panda3d.core.LVecBase3f', 'LVecBase3f', ({(179, 27, 179, 28): '0', (179, 30, 179, 31): '0', (179, 33, 179, 34): '0'}, {}), '(0, 0, 0)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((181, 16, 181, 42), 'panda3d.core.LPoint3f', 'LPoint3f', ({(181, 25, 181, 31): '-0.275', (181, 33, 181, 34): '0', (181, 36, 181, 41): '0.195'}, {}), '(-0.275, 0, 0.195)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((182, 24, 182, 48), 'panda3d.core.LVecBase3f', 'LVecBase3f', ({(182, 35, 182, 39): '0.15', (182, 41, 182, 42): '1', (182, 44, 182, 47): '0.2'}, {}), '(0.15, 1, 0.2)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((183, 22, 183, 39), 'panda3d.core.LPoint3f', 'LPoint3f', ({(183, 31, 183, 32): '0', (183, 34, 183, 35): '0', (183, 37, 183, 38): '0'}, {}), '(0, 0, 0)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((190, 16, 190, 35), 'panda3d.core.LVecBase3f', 'LVecBase3f', ({(190, 27, 190, 28): '0', (190, 30, 190, 31): '0', (190, 33, 190, 34): '0'}, {}), '(0, 0, 0)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((191, 16, 191, 40), 'panda3d.core.LPoint3f', 'LPoint3f', ({(191, 25, 191, 30): '-0.12', (191, 32, 191, 33): '0', (191, 35, 191, 39): '0.31'}, {}), '(-0.12, 0, 0.31)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((192, 18, 192, 43), 'panda3d.core.LVecBase3f', 'LVecBase3f', ({(192, 29, 192, 32): '0.1', (192, 34, 192, 37): '0.1', (192, 39, 192, 42): '0.1'}, {}), '(0.1, 0.1, 0.1)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((197, 20, 197, 42), 'panda3d.core.LVecBase4f', 'LVecBase4f', ({(197, 31, 197, 32): '0', (197, 34, 197, 35): '0', (197, 37, 197, 38): '0', (197, 40, 197, 41): '1'}, {}), '(0, 0, 0, 1)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((198, 20, 198, 42), 'panda3d.core.LVecBase4f', 'LVecBase4f', ({(198, 31, 198, 32): '0', (198, 34, 198, 35): '0', (198, 37, 198, 38): '0', (198, 40, 198, 41): '0'}, {}), '(0, 0, 0, 0)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((206, 16, 206, 35), 'panda3d.core.LVecBase3f', 'LVecBase3f', ({(206, 27, 206, 28): '0', (206, 30, 206, 31): '0', (206, 33, 206, 34): '0'}, {}), '(0, 0, 0)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((207, 16, 207, 40), 'panda3d.core.LPoint3f', 'LPoint3f', ({(207, 25, 207, 30): '-0.28', (207, 32, 207, 33): '0', (207, 35, 207, 39): '-0.1'}, {}), '(-0.28, 0, -0.1)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((208, 18, 208, 43), 'panda3d.core.LVecBase3f', 'LVecBase3f', ({(208, 29, 208, 32): '0.1', (208, 34, 208, 37): '0.1', (208, 39, 208, 42): '0.1'}, {}), '(0.1, 0.1, 0.1)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((213, 20, 213, 42), 'panda3d.core.LVecBase4f', 'LVecBase4f', ({(213, 31, 213, 32): '0', (213, 34, 213, 35): '0', (213, 37, 213, 38): '0', (213, 40, 213, 41): '1'}, {}), '(0, 0, 0, 1)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((214, 20, 214, 42), 'panda3d.core.LVecBase4f', 'LVecBase4f', ({(214, 31, 214, 32): '0', (214, 34, 214, 35): '0', (214, 37, 214, 38): '0', (214, 40, 214, 41): '0'}, {}), '(0, 0, 0, 0)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((222, 16, 222, 35), 'panda3d.core.LVecBase3f', 'LVecBase3f', ({(222, 27, 222, 28): '0', (222, 30, 222, 31): '0', (222, 33, 222, 34): '0'}, {}), '(0, 0, 0)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((223, 16, 223, 43), 'panda3d.core.LPoint3f', 'LPoint3f', ({(223, 25, 223, 31): '-0.275', (223, 33, 223, 34): '0', (223, 36, 223, 42): '-0.285'}, {}), '(-0.275, 0, -0.285)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((224, 18, 224, 43), 'panda3d.core.LVecBase3f', 'LVecBase3f', ({(224, 29, 224, 32): '0.1', (224, 34, 224, 37): '0.1', (224, 39, 224, 42): '0.1'}, {}), '(0.1, 0.1, 0.1)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((229, 20, 229, 42), 'panda3d.core.LVecBase4f', 'LVecBase4f', ({(229, 31, 229, 32): '0', (229, 34, 229, 35): '0', (229, 37, 229, 38): '0', (229, 40, 229, 41): '1'}, {}), '(0, 0, 0, 1)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((230, 20, 230, 42), 'panda3d.core.LVecBase4f', 'LVecBase4f', ({(230, 31, 230, 32): '0', (230, 34, 230, 35): '0', (230, 37, 230, 38): '0', (230, 40, 230, 41): '0'}, {}), '(0, 0, 0, 0)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((238, 16, 238, 35), 'panda3d.core.LVecBase3f', 'LVecBase3f', ({(238, 27, 238, 28): '0', (238, 30, 238, 31): '0', (238, 33, 238, 34): '0'}, {}), '(0, 0, 0)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((239, 16, 239, 42), 'panda3d.core.LPoint3f', 'LPoint3f', ({(239, 25, 239, 31): '-0.275', (239, 33, 239, 34): '0', (239, 36, 239, 41): '-0.17'}, {}), '(-0.275, 0, -0.17)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((240, 18, 240, 43), 'panda3d.core.LVecBase3f', 'LVecBase3f', ({(240, 29, 240, 32): '0.1', (240, 34, 240, 37): '0.1', (240, 39, 240, 42): '0.1'}, {}), '(0.1, 0.1, 0.1)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((245, 20, 245, 42), 'panda3d.core.LVecBase4f', 'LVecBase4f', ({(245, 31, 245, 32): '0', (245, 34, 245, 35): '0', (245, 37, 245, 38): '0', (245, 40, 245, 41): '1'}, {}), '(0, 0, 0, 1)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((246, 20, 246, 42), 'panda3d.core.LVecBase4f', 'LVecBase4f', ({(246, 31, 246, 32): '0', (246, 34, 246, 35): '0', (246, 37, 246, 38): '0', (246, 40, 246, 41): '0'}, {}), '(0, 0, 0, 0)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((254, 16, 254, 35), 'panda3d.core.LVecBase3f', 'LVecBase3f', ({(254, 27, 254, 28): '0', (254, 30, 254, 31): '0', (254, 33, 254, 34): '0'}, {}), '(0, 0, 0)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((255, 16, 255, 42), 'panda3d.core.LPoint3f', 'LPoint3f', ({(255, 25, 255, 31): '-0.275', (255, 33, 255, 34): '0', (255, 36, 255, 41): '-0.36'}, {}), '(-0.275, 0, -0.36)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((256, 18, 256, 43), 'panda3d.core.LVecBase3f', 'LVecBase3f', ({(256, 29, 256, 32): '0.1', (256, 34, 256, 37): '0.1', (256, 39, 256, 42): '0.1'}, {}), '(0.1, 0.1, 0.1)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((261, 20, 261, 42), 'panda3d.core.LVecBase4f', 'LVecBase4f', ({(261, 31, 261, 32): '0', (261, 34, 261, 35): '0', (261, 37, 261, 38): '0', (261, 40, 261, 41): '1'}, {}), '(0, 0, 0, 1)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n'), ((262, 20, 262, 42), 'panda3d.core.LVecBase4f', 'LVecBase4f', ({(262, 31, 262, 32): '0', (262, 34, 262, 35): '0', (262, 37, 262, 38): '0', (262, 40, 262, 41): '0'}, {}), '(0, 0, 0, 0)', False, 'from panda3d.core import LPoint3f, LVecBase3f, LVecBase4f, TextNode\n')] |
mkoo21/rss-review-scraper | publications/time_mag.py | 4adde8586ce55d7bb211bcfbb9bcccd1edc8b6a5 | from . import FROM_FEED_PUBLISHED_TODAY, STRINGIFY
def filter_by_tag(tag, entries):
matches = list(filter(
lambda x: any(list(map(
lambda y: y.term == tag,
x.tags
))),
entries
))
if len(matches) == 0:
return ""
return "<h2>TIME {} - {} results</h2>".format(tag, len(matches)) + \
"".join(list(map(lambda x: STRINGIFY(x, 'TIME'), matches)))
def TIME():
pub_today = FROM_FEED_PUBLISHED_TODAY('https://feeds2.feedburner.com/time/entertainment')
return filter_by_tag('movies', pub_today) + \
filter_by_tag('Television', pub_today)
| [] |
irobin591/advent-of-code-2019 | 2020/21/code.py | 279c28a2863558bd014b289802fff4b444c5d6cf | # Advent of Code 2020
# Day 21
# Author: irobin591
import os
import doctest
import re
re_entry = re.compile(r'^([a-z ]+) \(contains ([a-z, ]*)\)$')
with open(os.path.join(os.path.dirname(__file__), "input.txt"), 'r') as input_file:
input_data = input_file.read().strip().split('\n')
def part1(input_data):
"""
>>> part1(open(os.path.join(os.path.dirname(__file__), "test_part1.txt"), 'r').read().strip().split('\\n'))
5
"""
# dict['allergen'] = ['asdfa', 'agbsfb']
allergens = {}
ingredients = []
# map strings to allergens
for entry in input_data:
r = re_entry.match(entry)
if not r:
raise RuntimeError("")
contents = set(r.group(1).split(' '))
ingredients.extend(contents)
for allergen in r.group(2).split(', '):
if allergen not in allergens:
allergens[allergen] = contents
else:
# only keep already added ingredients
allergens[allergen] = [ingredient for ingredient in contents if ingredient in allergens[allergen]]
# print(allergens)
# print(ingredients)
ingredients_with_allergens = set([y for x in allergens.values() for y in x])
# print(list(filter(lambda i: i not in ingredients_with_allergens, ingredients)))
return len(list(filter(lambda i: i not in ingredients_with_allergens, ingredients)))
def part2(input_data):
"""
>>> part2(open(os.path.join(os.path.dirname(__file__), "test_part1.txt"), 'r').read().strip().split('\\n'))
'mxmxvkd,sqjhc,fvjkl'
"""
# dict['allergen'] = ['asdfa', 'agbsfb']
allergens = {}
ingredients = []
# map strings to allergens
for entry in input_data:
r = re_entry.match(entry)
if not r:
raise RuntimeError("")
contents = set(r.group(1).split(' '))
ingredients.extend(contents)
for allergen in r.group(2).split(', '):
if allergen not in allergens:
allergens[allergen] = list(contents)
else:
# only keep already added ingredients
allergens[allergen] = [ingredient for ingredient in contents if ingredient in allergens[allergen]]
# print(allergens)
# (allergen, ingredient)
assigned_allergens = []
while sum([len(ingreds) for ingreds in allergens.values()]) > 0:
for allergen in allergens:
if len(allergens[allergen]) == 1:
ingredient = allergens[allergen][0]
assigned_allergens.append((allergen, ingredient))
for allergen2 in allergens:
if ingredient in allergens[allergen2]:
allergens[allergen2].remove(ingredient)
assigned_allergens.sort(key=lambda x: x[0])
return ",".join([x[1] for x in assigned_allergens])
if __name__ == "__main__":
doctest.testmod()
print("Part One: {}".format(part1(input_data)))
print("Part Two: {}".format(part2(input_data)))
pass | [((9, 11, 9, 61), 're.compile', 're.compile', ({(9, 22, 9, 60): '"""^([a-z ]+) \\\\(contains ([a-z, ]*)\\\\)$"""'}, {}), "('^([a-z ]+) \\\\(contains ([a-z, ]*)\\\\)$')", False, 'import re\n'), ((95, 4, 95, 21), 'doctest.testmod', 'doctest.testmod', ({}, {}), '()', False, 'import doctest\n'), ((11, 23, 11, 48), 'os.path.dirname', 'os.path.dirname', ({(11, 39, 11, 47): '__file__'}, {}), '(__file__)', False, 'import os\n')] |
copart/pandoc-mustache | tests/test_html_escaping.py | f6ace29cd0c8d6b4d8f182eedcf36ad38a2412fa | """
Test that escaping characters for HTML is disabled.
"""
import os, subprocess
def test_escape_singlequote(tmpdir):
# Define empty dictionaries
doc = {}
template = {}
# Prepare file names
doc['path'] = tmpdir.join("document.md")
template['path'] = tmpdir.join("template.yaml")
# Prepare file contents
doc['metadata'] = '''---
mustache: {mustachefile}
---
'''
doc['mfiles'] = { "mustachefile": template['path'] }
doc['text'] = 'Hello {{place}}'
template['content'] = "place: world ' universe"
# Write contents to files
with open(doc['path'].strpath, "a") as myfile:
myfile.write(doc['metadata'].format(**doc['mfiles']))
myfile.write(doc['text'])
template['path'].write(template['content'])
# Run pandoc
output = subprocess.check_output(["pandoc", doc['path'].strpath, "--filter", "pandoc-mustache", "--to=plain"], universal_newlines=True)
# Test output
assert output == "Hello world ' universe\n"
def test_escape_gt(tmpdir):
# Define empty dictionaries
doc = {}
template = {}
# Prepare file names
doc['path'] = tmpdir.join("document.md")
template['path'] = tmpdir.join("template.yaml")
# Prepare file contents
doc['metadata'] = '''---
mustache: {mustachefile}
---
'''
doc['mfiles'] = { "mustachefile": template['path'] }
doc['text'] = 'Hello {{place}}'
template['content'] = "place: world > universe"
# Write contents to files
with open(doc['path'].strpath, "a") as myfile:
myfile.write(doc['metadata'].format(**doc['mfiles']))
myfile.write(doc['text'])
template['path'].write(template['content'])
# Run pandoc
output = subprocess.check_output(["pandoc", doc['path'].strpath, "--filter", "pandoc-mustache", "--to=plain"], universal_newlines=True)
# Test output
assert output == "Hello world > universe\n"
def test_escape_ampersand(tmpdir):
# Define empty dictionaries
doc = {}
template = {}
# Prepare file names
doc['path'] = tmpdir.join("document.md")
template['path'] = tmpdir.join("template.yaml")
# Prepare file contents
doc['metadata'] = '''---
mustache: {mustachefile}
---
'''
doc['mfiles'] = { "mustachefile": template['path'] }
doc['text'] = 'Hello {{place}}'
template['content'] = "place: world & universe"
# Write contents to files
with open(doc['path'].strpath, "a") as myfile:
myfile.write(doc['metadata'].format(**doc['mfiles']))
myfile.write(doc['text'])
template['path'].write(template['content'])
# Run pandoc
output = subprocess.check_output(["pandoc", doc['path'].strpath, "--filter", "pandoc-mustache", "--to=plain"], universal_newlines=True)
# Test output
assert output == "Hello world & universe\n"
| [((32, 13, 32, 139), 'subprocess.check_output', 'subprocess.check_output', (), '', False, 'import os, subprocess\n'), ((63, 13, 63, 139), 'subprocess.check_output', 'subprocess.check_output', (), '', False, 'import os, subprocess\n'), ((94, 13, 94, 139), 'subprocess.check_output', 'subprocess.check_output', (), '', False, 'import os, subprocess\n')] |
iandees/microdata2osm | app.py | 1505b8072880055033ddbb85626fcdb857c97d4e | from flask import Flask, jsonify, request
from w3lib.html import get_base_url
import extruct
import requests
app = Flask(__name__)
def extract_osm_tags(data):
tags = {}
schema_org_type = data.get('@type')
if schema_org_type == 'Restaurant':
tags['amenity'] = 'restaurant'
serves_cuisine = tags.get('servesCuisine')
if serves_cuisine:
cuisine = []
if 'Burgers' in serves_cuisine:
cuisine.append('burger')
if 'Fast Casual' in serves_cuisine:
tags['amenity'] = 'fast_food'
elif schema_org_type == 'Hotel':
tags['tourism'] = 'hotel'
elif schema_org_type == 'ExerciseGym':
tags['leisure'] = 'fitness_centre'
elif schema_org_type == 'BankOrCreditUnion':
tags['amenity'] = 'bank'
else:
return {}
address = data.get('address', {}).get('streetAddress')
if address:
tags['addr:full'] = address
address = data.get('address', {}).get('addressLocality')
if address:
tags['addr:city'] = address
address = data.get('address', {}).get('addressRegion')
if address:
tags['addr:state'] = address
address = data.get('address', {}).get('postalCode')
if address:
tags['postcode'] = address
address = data.get('address', {}).get('addressCountry')
if address:
tags['addr:country'] = address
brand = data.get('brand')
if brand:
tags['brand'] = brand
name = data.get('name')
if name:
tags['name'] = name
telephone = data.get('telephone')
if telephone:
tags['phone'] = telephone
faxNumber = data.get('faxNumber')
if faxNumber:
tags['fax'] = faxNumber
url = data.get('url')
if url:
tags['website'] = url
return tags
@app.route("/extract")
def extract():
url = request.args.get('url')
if not url:
return jsonify(error="Must specify url parameter"), 400
app.logger.info("Extracting json-ld from %s", url)
r = requests.get(url)
if r.status_code != 200:
app.logger.info("HTTP %s from %s", r.status_code, url)
return jsonify(error="Error fetching url"), 502
base_url = get_base_url(r.text, r.url)
data = extruct.extract(r.text, base_url=base_url, syntaxes=["json-ld"])
data = data.get('json-ld')
output = {}
suggested_tags = {}
for entry in data:
suggested_tags.update(extract_osm_tags(entry))
output = {
'status': {
'url': url,
'success': len(suggested_tags) > 0,
},
'suggested_tags': suggested_tags,
}
if request.args.get('include_extracted', type=bool):
output['extracted'] = data
return jsonify(output)
| [((6, 6, 6, 21), 'flask.Flask', 'Flask', ({(6, 12, 6, 20): '__name__'}, {}), '(__name__)', False, 'from flask import Flask, jsonify, request\n'), ((73, 10, 73, 33), 'flask.request.args.get', 'request.args.get', ({(73, 27, 73, 32): '"""url"""'}, {}), "('url')", False, 'from flask import Flask, jsonify, request\n'), ((80, 8, 80, 25), 'requests.get', 'requests.get', ({(80, 21, 80, 24): 'url'}, {}), '(url)', False, 'import requests\n'), ((86, 15, 86, 42), 'w3lib.html.get_base_url', 'get_base_url', ({(86, 28, 86, 34): 'r.text', (86, 36, 86, 41): 'r.url'}, {}), '(r.text, r.url)', False, 'from w3lib.html import get_base_url\n'), ((87, 11, 87, 75), 'extruct.extract', 'extruct.extract', (), '', False, 'import extruct\n'), ((103, 7, 103, 55), 'flask.request.args.get', 'request.args.get', (), '', False, 'from flask import Flask, jsonify, request\n'), ((106, 11, 106, 26), 'flask.jsonify', 'jsonify', ({(106, 19, 106, 25): 'output'}, {}), '(output)', False, 'from flask import Flask, jsonify, request\n'), ((76, 15, 76, 58), 'flask.jsonify', 'jsonify', (), '', False, 'from flask import Flask, jsonify, request\n'), ((84, 15, 84, 50), 'flask.jsonify', 'jsonify', (), '', False, 'from flask import Flask, jsonify, request\n')] |
davemasino/airflow101 | dags/simple_python_taskflow_api.py | f940e169b9c562e3834a201827b615744a99b86d | """
A simple Python DAG using the Taskflow API.
"""
import logging
import time
from datetime import datetime
from airflow import DAG
from airflow.decorators import task
log = logging.getLogger(__name__)
with DAG(
dag_id='simple_python_taskflow_api',
schedule_interval=None,
start_date=datetime(2021, 1, 1),
catchup=False,
tags=['airflow101'],
) as dag:
@task(task_id="hello_message")
def say_hello():
"""Print a hello message"""
print("Hello, World!")
hello_task = say_hello()
@task(task_id="go_to_sleep")
def sleep_for_1():
"""Go to sleep"""
time.sleep(1)
sleeping_task = sleep_for_1()
hello_task >> sleeping_task
| [((11, 6, 11, 33), 'logging.getLogger', 'logging.getLogger', ({(11, 24, 11, 32): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((20, 5, 20, 34), 'airflow.decorators.task', 'task', (), '', False, 'from airflow.decorators import task\n'), ((27, 5, 27, 32), 'airflow.decorators.task', 'task', (), '', False, 'from airflow.decorators import task\n'), ((30, 8, 30, 21), 'time.sleep', 'time.sleep', ({(30, 19, 30, 20): '(1)'}, {}), '(1)', False, 'import time\n'), ((16, 15, 16, 35), 'datetime.datetime', 'datetime', ({(16, 24, 16, 28): '(2021)', (16, 30, 16, 31): '(1)', (16, 33, 16, 34): '(1)'}, {}), '(2021, 1, 1)', False, 'from datetime import datetime\n')] |
kenkellner/pyunmarked | pyunmarked/roylenichols.py | 485bd96b4ca12a019b478fc19f68f577279ac9b8 | from . import model
import numpy as np
from scipy import special, stats
class RoyleNicholsModel(model.UnmarkedModel):
def __init__(self, det_formula, abun_formula, data):
self.response = model.Response(data.y)
abun = model.Submodel("Abundance", "abun", abun_formula, np.exp, data.site_covs)
det = model.Submodel("Detection", "det", det_formula, special.expit, data.obs_covs)
self.submodels = model.SubmodelDict(abun=abun, det=det)
def negloglik(self, x, mod, K):
x = np.array(x)
beta_abun = x[mod["abun"].index]
beta_det = x[mod["det"].index]
y = mod.response.y
N, J = y.shape
lam = mod["abun"].predict(beta=beta_abun, interval=False)
r = mod["det"].predict(beta=beta_det, interval=False).reshape(N, J)
q = 1 - r
nll = 0.0
for i in range(N):
kvals = range(int(mod.response.Kmin[i]), int(K)+1)
f = stats.poisson.pmf(kvals, lam[i])
ymat = np.tile(y[i,], (len(kvals), 1))
qmat = np.tile(q[i,], (len(kvals), 1))
kmat = np.tile(kvals, (J, 1)).transpose()
pmat = 1 - qmat**kmat
g = stats.binom.logpmf(ymat, 1, pmat).sum(axis=1)
fg = f * np.exp(g)
nll -= np.log(fg.sum())
return nll
def simulate(self):
N, J = self.response.y.shape
lam = self.predict("abun", interval=False)
q = 1 - self.predict("det", interval=False).reshape(N, J)
z = np.random.poisson(lam, N)
zrep = np.tile(z, (J,1)).transpose()
p = 1 - q**zrep
y = np.empty((N, J))
for i in range(N):
y[i,] = np.random.binomial(1, p[i,], J)
return y
| [((13, 12, 13, 23), 'numpy.array', 'np.array', ({(13, 21, 13, 22): 'x'}, {}), '(x)', True, 'import numpy as np\n'), ((39, 12, 39, 37), 'numpy.random.poisson', 'np.random.poisson', ({(39, 30, 39, 33): 'lam', (39, 35, 39, 36): 'N'}, {}), '(lam, N)', True, 'import numpy as np\n'), ((42, 12, 42, 28), 'numpy.empty', 'np.empty', ({(42, 21, 42, 27): '(N, J)'}, {}), '((N, J))', True, 'import numpy as np\n'), ((24, 16, 24, 48), 'scipy.stats.poisson.pmf', 'stats.poisson.pmf', ({(24, 34, 24, 39): 'kvals', (24, 41, 24, 47): 'lam[i]'}, {}), '(kvals, lam[i])', False, 'from scipy import special, stats\n'), ((44, 20, 44, 51), 'numpy.random.binomial', 'np.random.binomial', ({(44, 39, 44, 40): '1', (44, 42, 44, 47): 'p[i,]', (44, 49, 44, 50): 'J'}, {}), '(1, p[i,], J)', True, 'import numpy as np\n'), ((30, 21, 30, 30), 'numpy.exp', 'np.exp', ({(30, 28, 30, 29): 'g'}, {}), '(g)', True, 'import numpy as np\n'), ((40, 15, 40, 32), 'numpy.tile', 'np.tile', ({(40, 23, 40, 24): 'z', (40, 26, 40, 31): '(J, 1)'}, {}), '(z, (J, 1))', True, 'import numpy as np\n'), ((27, 19, 27, 41), 'numpy.tile', 'np.tile', ({(27, 27, 27, 32): 'kvals', (27, 34, 27, 40): '(J, 1)'}, {}), '(kvals, (J, 1))', True, 'import numpy as np\n'), ((29, 16, 29, 49), 'scipy.stats.binom.logpmf', 'stats.binom.logpmf', ({(29, 35, 29, 39): 'ymat', (29, 41, 29, 42): '1', (29, 44, 29, 48): 'pmat'}, {}), '(ymat, 1, pmat)', False, 'from scipy import special, stats\n')] |
pgriewank/ASR_tools | proc_chords_xarray.py | 306a7d92725888485a35f8824433ad7b0451b569 | #Contains the functions needed to process both chords and regularized beards
# proc_chords is used for chords
#proc_beard_regularize for generating beards
#proc_pdf saves pdfs of a variable below cloud base
#Both have a large overlap, but I split them in two to keep the one script from getting to confusing.
import numpy as np
import math
from netCDF4 import Dataset
import os
import time as ttiimmee
from scipy.interpolate import interp1d
from scipy.interpolate import interp2d
#from scipy.interpolate import griddata
#from mpl_toolkits.axes_grid1 import make_axes_locatable
import pickle
import sys
#sys.path.insert(0, "/home/pgriewank/code/2019-chords-plumes/")
#from unionfind import UnionFind
from cusize_functions import *
#import matplotlib.pyplot as plt
import pandas as pd
import gc
import glob
import xarray as xr
#turned into a function
#removed the possibility to loop over multiple dates, if you want to do that call the function repeatedly
#Full list of variables to analyze is unclear, I will try to include everything available, but this might break the memory bank
#want to keep the automatic x and y calculation
#Scaling shouldn't be needed, as all chord properties should be indepenent of wind direction (right?)
#Similarly, no basedefinition is needed, all values are relative to cloud base
#Should be able to work for any variable in the column output, or for any 3D variable as long as it is named the same as the file.
#Changing 3D output
#Default is now to always go over x and y directions
#TODO
#plot_flag disabled for the mean time
def proc_chords( date_str='20160611',
directory_input='/data/testbed/lasso/sims/',
directory_output='/data/testbed/lasso/chords/',
data_dim_flag=1,
base_percentile = 25,
special_name='',
chord_times = 0,
N_it_min=0,
N_it_max=1e9):
# plot_curtains_flag: 0 nothing, 1 plots pre regularization plots, currently dissabled
# data_dim_flag: 1 = column, 3 = 3D snapshot
# chord_times: 0 use Neils values, use values that fit model output exactly with not gap possible
# directory_input = '/data/testbed/lasso/sims/' #+date
# N_it_max = maximum number of iterables, 3D timesteps or column files. Used for testing things quickly
# N_it_min = start number of iterables, 3D timesteps or column files. Only reall makes sense for 3D to avoid some weird initial fields.
time_begin = ttiimmee.time()
dz = 25.0 #39.0625 #should be overwritten after the profile data is loaded
dx = 25.0
date = date_str
n_percentiles = 7 #Number of percentiles
percentiles = np.array([5,10,35,50,65,90,95])
#1D clustering parameters in seconds, taken to agree with Lareau
if chord_times == 0:
t_gap = 20
t_min = 30
t_max = 1200*100 #Made a 100 times longer
cell_min = 3 #Minimal number of cells needed per chord
# #1D clustering parameters,
#set super strict, but goes on for a loooong time as well
if chord_times == 1:
t_gap = 0. #should be pretty strict, no gaps allowed!
t_min = 0.0
t_max = 1e9
cell_min = 3 #Minimal number of cells needed per chord
ql_min = 1e-5 #value used to determine existence of cloud
z_min = 10 #Index of minimum z_vlvl of the cbl
print('looking into date: ',date)
if data_dim_flag==1:
filename_column = []
#uses glob to get all files which contain column.
column_files = glob.glob(directory_input+date+'/*column*.nc')
for c_file in column_files:
filename_column.append(c_file)
print('filename column included:',c_file)
if data_dim_flag==3:
filename_w = directory_input+date+'/w.nc'
filename_l = directory_input+date+'/ql.nc'
filename_qt = directory_input+date+'/qt.nc'
filename_thl = directory_input+date+'/thl.nc'
file_w = Dataset(filename_w,read='r')
file_ql = Dataset(filename_l,read='r')
file_thl = Dataset(filename_thl,read='r')
file_qt = Dataset(filename_qt,read='r')
[nz, nx, ny] = get_zxy_dimension(filename_l,'ql')
filename_prof=glob.glob(directory_input+date+'/*default?0*.nc')[0]
#if date=='bomex':
# filename_prof=directory_input+date+'/bomex.default.0000000.nc'
file_prof = Dataset(filename_prof,read='r')
n_chords = 0
#I will try lists first, which I will then convert to arrays in the end before saving in pandas
chord_timesteps = []
chord_length = []
chord_duration = []
chord_time = []
chord_height = [] #percentile of cloud base
chord_w = []
chord_w_up = [] #mean over updrafts
chord_w_base = []
chord_w_star = []
chord_thl_star = []
chord_qt_star = []
chord_thl = []
chord_thl_25 = []
chord_thl_75 = []
chord_qt = []
chord_qt_25 = []
chord_qt_75 = []
chord_w_flux = [] #Sum of w below
#Coming next
chord_w_per = np.zeros([0,n_percentiles])
chord_w_per_up = np.zeros([0,n_percentiles])
#This now a bit trickier then for the 3D version. Will have to calculate a vector for the lower time resolution of the profile,
#Then latter apply the nearest value to the full 1d time vec
#First loading surface variables from default profile
print('calculating cbl height from profile file')
T = file_prof['thl'][:,0]
p = file_prof['p'][:,0]*0.0+99709
qt = file_prof['qt'][:,0]
w2 = file_prof['w2'][:,:]
thl_prof = file_prof['thl'][:,:]
qt_prof = file_prof['qt'][:,:]
nz_prof = w2.shape[1]
z_prof = file_prof['z'][:]
dz = z_prof[1]-z_prof[0]
total_surf_buoy_flux = file_prof['bflux'][:,1]
total_surf_thl_flux = file_prof['thlflux'][:,1]
total_surf_qt_flux = file_prof['qtflux'][:,1]
print('dz: ',dz)
time_prof = file_prof['time'][:]
cbl_1d_prof = time_prof*0.0
#Hack together the Lifting condensation level LCL
qt_pressure = p*qt
sat_qv = 6.112*100 * np.exp(17.67 * (T - 273.15) / (T - 29.65 ))
#rel_hum = np.asmatrix(qt_pressure/sat_qv)[0]
rel_hum = qt_pressure/sat_qv
#Dewpoint
A = 17.27
B = 237.7
alpha = ((A * (T- 273.15)) / (B + (T-273.15)))
alpha = alpha + np.log(rel_hum)
dewpoint = (B * alpha) / (A - alpha)
dewpoint = dewpoint + 273.15
LCL = 125.*(T-dewpoint)
LCL_index = np.floor(LCL/dz)
#now calculate the cbl top for each profile time
for tt in range(len(time_prof)):
w_var = 1.0
z=z_min
while w_var > 0.08:
z += 1
w_var = w2[tt,z]
#w_var = np.var(w_1d[z,:])
#Mimimum of LCL +100 or variance plus 300 m
cbl_1d_prof[tt] = min(z+300/dz,LCL_index[tt])
#To avoid issues later on I set the maximum cbl height to 60 % of the domain height, but spit out a warning if it happens
if cbl_1d_prof[tt]>0.6*nz_prof:
print('warning, cbl height heigher than 0.6 domain height, could crash regularization later on, timestep: ',tt)
cbl_1d_prof[tt] = math.floor(nz*0.6)
print('resulting indexes of cbl over time: ',cbl_1d_prof)
print('calculated LCL: ',LCL_index)
#Now we either iterate over columns or timesteps
if data_dim_flag==1:
n_iter =len(filename_column)
if data_dim_flag==3:
n_iter =len(time_prof)
#for col in filename_column:
n_iter = min(n_iter,N_it_max)
for it in range(N_it_min,n_iter):
print('n_chords: ',n_chords)
time1 = ttiimmee.time()
if data_dim_flag ==1:
print('loading column: ',filename_column[it])
file_col = Dataset(filename_column[it],read='r')
w_2d = file_col.variables['w'][:]
w_2d = w_2d.transpose()
ql_2d = file_col.variables['ql'][:]
ql_2d = ql_2d.transpose()
t_1d = file_col.variables['time'][:]
print('t_1d',t_1d)
thl_2d = file_col.variables['thl'][:]
thl_2d = thl_2d.transpose()
qt_2d = file_col.variables['qt'][:]
qt_2d = qt_2d.transpose()
u_2d = file_col.variables['u'][:]
u_2d = u_2d.transpose()
v_2d = file_col.variables['v'][:]
v_2d = v_2d.transpose()
#lets try saving memory by closing files
#file_col.close()
#The needed cbl height
cbl_1d = t_1d*0
#The needed surface_bouyancy_flux
bflux_s_1d = t_1d*0
qtflux_s_1d = t_1d*0
thlflux_s_1d = t_1d*0
#Now we go through profile time snapshots and allocate the closest full time values to the profile values
dt_2 = (time_prof[1]-time_prof[0])/2
for tt in range(len(time_prof)):
cbl_1d[abs(t_1d-time_prof[tt])<dt_2] = cbl_1d_prof[tt]
bflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_buoy_flux[tt]
qtflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_qt_flux[tt]
thlflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_thl_flux[tt]
#to get anomalies of thl and qt we subtract the closet mean profile
for tt in range(len(time_prof)):
#globals().update(locals())
tmp_matrix = thl_2d[:,abs(t_1d-time_prof[tt])<dt_2]
tmp_vector = thl_prof[tt,:]
#because the vectors don't perfectly align
thl_2d[:,abs(t_1d-time_prof[tt])<dt_2] = (tmp_matrix.transpose() - tmp_vector).transpose()
tmp_matrix = qt_2d[:,abs(t_1d-time_prof[tt])<dt_2]
tmp_vector = qt_prof[tt,:]
#because the vectors don't perfectly align
qt_2d[:,abs(t_1d-time_prof[tt])<dt_2] = (tmp_matrix.transpose() - tmp_vector).transpose()
# = var_2d[:,abs(t_1d-time_prof[tt])<dt_2]-var_prof[tt,:]
if data_dim_flag ==3:
if sum(file_prof['ql'][it,:])>0.0:
print('loading timestep: ',it)
ql_3d = grab_3d_field(file_ql ,it,'ql')
w_3d = grab_3d_field(file_w ,it,'w')
qt_3d = grab_3d_field(file_qt ,it,'qt')
thl_3d = grab_3d_field(file_thl ,it,'thl')
#Here we have to do all the fuckery to turn the 3D fields into 2d slices with an imaginary time vector
w_2d = np.array(w_3d.reshape((nz,nx*ny)))
ql_2d = np.array(ql_3d.reshape((nz,nx*ny)))
qt_2d = np.array(qt_3d.reshape((nz,nx*ny)))
thl_2d = np.array(thl_3d.reshape((nz,nx*ny)))
#Now we do the same thing with the transposed field, use to be an either or, now just add it on
w_3d = np.transpose( w_3d, (0, 2, 1))
ql_3d = np.transpose(ql_3d, (0, 2, 1))
qt_3d = np.transpose(qt_3d, (0, 2, 1))
thl_3d = np.transpose(thl_3d, (0, 2, 1))
w_2d = np.hstack([w_2d ,np.array(w_3d.reshape((nz,nx*ny)))])
ql_2d = np.hstack([ql_2d ,np.array(ql_3d.reshape((nz,nx*ny)))])
thl_2d = np.hstack([thl_2d ,np.array(thl_3d.reshape((nz,nx*ny)))])
qt_2d = np.hstack([qt_2d ,np.array(qt_3d.reshape((nz,nx*ny)))])
#Should now be able to delete 3d fields as they aren't needed anymore, not sure if that helps save any memory though
del w_3d
del ql_3d
del thl_3d
del qt_3d
#hopefully this helps
gc.collect()
#Getting anomalies of thl and qt
qt_2d[:,:] = (qt_2d.transpose() - qt_prof[it,:]).transpose()
thl_2d[:,:] = (thl_2d.transpose() - thl_prof[it,:]).transpose()
#to get the fake time vector we load the wind from the profile data, which devided by the grid spacing gives us a fake time resolution
#we use the calculated cbl+300 meter or lcl as reference height
ref_lvl = cbl_1d_prof[it]
u_ref = file_prof['u'][it,ref_lvl]
v_ref = file_prof['v'][it,ref_lvl]
V_ref = np.sqrt(u_ref**2+v_ref**2)
time_resolution = dx/V_ref
print('time iterative, V_ref, time_resolution',it, str(V_ref)[:4], str(time_resolution)[:4] )
#fake t vector,
t_1d = np.linspace(0,2*nx*ny*time_resolution,2*nx*ny)#+nx*ny*time_resolution*it
#dt_1d = t_1d*0
#dt_1d[1:] = t_1d[1:]-t_1d[:-1]
else:
#If no clouds are present we pass a very short empty fields over to the chord searcher
print('skipping timestep: ',it,' cause no clouds')
ql_2d = np.zeros((nz,1))
w_2d = np.zeros((nz,1))
thl_2d = np.zeros((nz,1))
qt_2d = np.zeros((nz,1))
t_1d = np.zeros(1)
#The needed cbl height, which constant everywhere
cbl_1d = t_1d*0
cbl_1d[:] = cbl_1d_prof[it]
#The needed surface buoyancy flux, which is constant everywhere
bflux_s_1d = t_1d*0 + total_surf_buoy_flux[it]
qtflux_s_1d = t_1d*0 + total_surf_qt_flux[it]
thlflux_s_1d = t_1d*0 + total_surf_thl_flux[it]
time2 = ttiimmee.time()
print('loading time:',(time2-time1)*1.0,)
### Detecting lowest cloud cell is within 300 m of CBL
nt = len(cbl_1d)
cl_base = np.zeros(nt)
#Detecting all cloudy cells
#Use to have a different method using nans that doesn:t work anymore somehow. Now I just set it really high where there is no cloud.
for t in range(nt):
if np.max(ql_2d[:,t])>ql_min :
cl_base[t]=np.argmax(ql_2d[:,t]>1e-6)
else:
cl_base[t]=10000000
cl_base=cl_base.astype(int)
#Now find c base lower than the max height
cbl_cl_idx = np.where((cl_base-cbl_1d[:nt])*dz<0)[0]
cbl_cl_binary = cl_base*0
cbl_cl_binary[cbl_cl_idx]=1
t_cbl_cl=t_1d[cbl_cl_idx]
### Clustering 1D
#Now we simply go through all cloudy timesteps and detect chords
#If they fulful chord time requirements and have a number of values which fulfills cell_min they are counted as a chord
#and their properties are calculatted immediately
t_cloudy_idx = 0
#n_chords = 0
chord_idx_list = []
print('iterating through step ',it,'which contains ',len(cbl_cl_idx),'cloudy columns')
chord_idx_list = []
while t_cloudy_idx < len(cbl_cl_idx)-1:# and n_curtain<100*it: ####################################GO HERE TO SET MAXIMUM CURTAIN
#print(t_chord_begin)
t_chord_begin = t_cloudy_idx
#now connecting all cloudy indexes
#Originally only cared if they fulfilled cloud criteria, but now I also hard coded that neighboring cells always count
##Check if the index of the next cloudy cell is the same as the next index in total, if so the cells are connected
while t_cloudy_idx < len(cbl_cl_idx)-1 and (cbl_cl_idx[t_cloudy_idx+1]==cbl_cl_idx[t_cloudy_idx]+1 or t_cbl_cl[t_cloudy_idx+1]-t_cbl_cl[t_cloudy_idx]<t_gap):
t_cloudy_idx += 1
t_chord_end = t_cloudy_idx
#Checking if it fulfils chord criteria regaring time
#we also added a minimum height of 100 m to screen out fog/dew stuff at the surface
if t_chord_end-t_chord_begin>cell_min:
chord_z_min = np.min(cl_base[cbl_cl_idx[t_chord_begin:t_chord_end]])
ch_duration = t_cbl_cl[t_chord_end]-t_cbl_cl[t_chord_begin]
else:
chord_z_min = 0
ch_duration = 0
if ch_duration>t_min and ch_duration<t_max and chord_z_min > 4:
if t_chord_end-t_chord_begin>cell_min-1:
n_chords += 1
#Getting the chord beginning and end
idx_beg_chord = cbl_cl_idx[t_chord_begin]
idx_end_chord = cbl_cl_idx[t_chord_end]
time_beg_chord = t_1d[idx_beg_chord]
time_end_chord = t_1d[idx_end_chord]
#chord_idx_list.append(list(cbl_cl_idx[t_chord_begin:t_chord_end]))
#list of relevant chord indexes
ch_idx_l = list(cbl_cl_idx[t_chord_begin:t_chord_end])
#getting V_ref if data_dim_flag==1. Is calculated directly from the cloud base speeds
if data_dim_flag==1:
u_ref=np.mean(u_2d[cl_base[ch_idx_l],ch_idx_l])
v_ref=np.mean(v_2d[cl_base[ch_idx_l],ch_idx_l])
V_ref=np.sqrt(u_ref**2+v_ref**2)
### Now appending chord properties
chord_timesteps.append(t_chord_end-t_chord_begin)
chord_duration.append(ch_duration)
chord_length.append(ch_duration*V_ref)
tmp_base_height = np.percentile(cl_base[ch_idx_l],base_percentile)*dz
chord_height.append(tmp_base_height) #25th percentile of cloud base
surf_b_flux = np.mean(bflux_s_1d[idx_beg_chord:idx_end_chord])
w_star = (tmp_base_height*surf_b_flux)**(1./3.)
surf_qt_flux = np.mean(qtflux_s_1d[idx_beg_chord:idx_end_chord])
qt_star = surf_qt_flux/w_star
surf_thl_flux = np.mean(thlflux_s_1d[idx_beg_chord:idx_end_chord])
thl_star = surf_thl_flux/w_star
chord_w_star.append(w_star )
chord_thl_star.append(thl_star )
chord_qt_star.append(qt_star )
chord_w_base.append(np.mean(w_2d[cl_base[ch_idx_l],ch_idx_l]))
chord_w.append(np.mean(w_2d[cl_base[ch_idx_l]-1,ch_idx_l]))
chord_thl.append(np.mean(thl_2d[cl_base[ch_idx_l]-1,ch_idx_l]))
#get a fourth and 3/4 of the cloud base
cl_base_25_idx = cl_base[ch_idx_l]*0 + int(np.percentile(cl_base[ch_idx_l],base_percentile)/4.)
cl_base_75_idx = cl_base[ch_idx_l]*0 + int(np.percentile(cl_base[ch_idx_l],base_percentile)*3./4.)
#print ('cl base idx:',np.percentile(cl_base[ch_idx_l],base_percentile),'clbase/4:',cl_base_25_idx[0],'clbase3/4:',cl_base_75_idx[0])
chord_thl_25.append(np.mean(thl_2d[cl_base_25_idx,ch_idx_l]))
chord_thl_75.append(np.mean(thl_2d[cl_base_75_idx,ch_idx_l]))
chord_qt.append(np.mean(qt_2d[cl_base[ch_idx_l]-1,ch_idx_l]))
chord_qt_75.append(np.mean(qt_2d[cl_base_75_idx,ch_idx_l]))
chord_qt_25.append(np.mean(qt_2d[cl_base_25_idx,ch_idx_l]))
chord_w_flux.append(np.sum(w_2d[cl_base[ch_idx_l]-1,ch_idx_l]))
w_base_vec = w_2d[cl_base[ch_idx_l]-1,ch_idx_l]
chord_w_up.append(np.mean(w_base_vec[w_base_vec>0.0]))
tmp_w_per = np.percentile(w_base_vec,percentiles)
if len(w_base_vec[w_base_vec>0.0])>0:
tmp_w_per_up = np.percentile(w_base_vec[w_base_vec>0.0],percentiles)
else:
tmp_w_per_up = np.zeros(n_percentiles)
tmp_w_per_up[:] = 'nan'
chord_w_per = np.vstack([chord_w_per,tmp_w_per])
chord_w_per_up = np.vstack([chord_w_per,tmp_w_per_up])
if data_dim_flag==1:
chord_time.append(np.mean(t_1d[ch_idx_l]))
if data_dim_flag==3:
chord_time.append(time_prof[it])
t_cloudy_idx += 1
time3 = ttiimmee.time()
print('iterable: ',it)
print('n_chords: ',n_chords)
print('number of time points included: ',len(cbl_cl_idx))
#Does it matter if I turn these from lists to arrays? Fuck it, will do it anyway
chord_timesteps=np.asarray(chord_timesteps)
chord_duration =np.asarray(chord_duration)
chord_length =np.asarray(chord_length)
chord_height =np.asarray(chord_height)
chord_w_base =np.asarray(chord_w_base)
chord_w_star =np.asarray(chord_w_star)
chord_thl_star =np.asarray(chord_thl_star)
chord_qt_star =np.asarray(chord_qt_star)
chord_w =np.asarray(chord_w)
chord_w_up =np.asarray(chord_w_up)
chord_w_flux =np.asarray(chord_w_flux)
chord_thl =np.asarray(chord_thl)
chord_thl_25 =np.asarray(chord_thl_25)
chord_thl_75 =np.asarray(chord_thl_75)
chord_qt =np.asarray(chord_qt)
chord_qt_25 =np.asarray(chord_qt_25)
chord_qt_75 =np.asarray(chord_qt_75)
chord_time =np.asarray(chord_time)
#Saving
print('all chords: ',len(chord_duration))
save_string_base = 'chord_prop_'+date+'_d'+str(data_dim_flag)+'_ct'+str(chord_times)
if N_it_min>0:
save_string_base = save_string_base+'_Nmin'+str(N_it_min)
if N_it_max<1e9:
save_string_base = save_string_base+'_Nmax'+str(n_iter)
save_string_base = save_string_base+'_'+special_name+'_N'+str(n_chords)
filename_chord_panda = directory_output+save_string_base+'.pkl'
data_for_panda = list(zip(chord_timesteps,chord_duration,chord_length,chord_height,chord_w_base,chord_w,chord_w_flux,chord_time,chord_w_up,chord_w_per,chord_w_per_up,
chord_w_star,chord_thl_star,chord_qt_star,
chord_thl,chord_thl_25,chord_thl_75,chord_qt,chord_qt_25,chord_qt_75))
df = pd.DataFrame(data = data_for_panda, columns=['timesteps','duration','length','height','w_base','w','w_flux','time','w up','w per','w per up',
'w star','thl star','qt star',
'thl','thl 25','thl 75','qt','qt 25','qt 75'])
df.to_pickle(filename_chord_panda)
time_end = ttiimmee.time()
print('total run time of proc_chords in minutes: ',(time_end-time_begin)/60.)
print(':')
print(':')
print('chordlength properties saved as panda in ',filename_chord_panda)
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
return
#turned into a function
#removed the possibility to loop over multiple dates, if you want to do that call the function repeatedly
#Should be able to work for any variable in the column output, or for any 3D variable as long as it is named the same as the file.
#If the input data is a 3D field it will always go over x and y directions
#Two different scale_flags added to rotate the curtain to point upwind.
#TODO
#plot_flag disabled for the mean time
def proc_beard_regularize(reg_var = 'w',
date_str='20160611',
directory_input='/data/testbed/lasso/sims/',
directory_output = 'data_curtains/',
data_dim_flag=1,
base_smoothing_flag=2,
plot_curtains_flag = 0,
base_percentile = 25,
special_name='',
scale_flag=2,
chord_times = 0,
anomaly_flag = 0,
N_it_max=1e9,
N_it_min=0,
size_bin_flag=0,
N_bins=12,
bin_size = 250,
curtain_extra = 1.0,
chord_max = 1e9,
boundary_scaling_flag = 0
):
# reg_var = variable that will be regularized
# plot_curtains_flag: 0 nothing, 1 plots pre and post regularization plots of reg_var
# data_dim_flag: 1 = column, 3 = 3D snapshot
# time_slice_curtain: 0 only puts out the total sums, 1: adds a seperate output for each time slice, is needed for scale_flag
# scale_flag: If 0, nothing, if 1, it scales the output by u/sqrt(u^2+v^2) and flips the vector if u>0. Is set to 0 if data_dim_flag==1
# 1 the ref_lvl used is determined from the mean cloud base height
# 2, similar to 1 but now using a profile
#
# base_smoothing_flag: 0 use mix of percentile and cloud base as done my Neil, 1: smooth out base after setting it with running average 2: just use percentile defined by base_percentile
# base_percentile: percentile used to find chordlength bottom
# chord_times: 0 use Neils values, use values that fit model output exactly with not gap possible
# anomaly_flag: 0 use reg_var as it is. 1 use reg_var - profile. Works easiest for 3d output, 1d_flag needs to use the closet mean profile
# directory_input = '/data/testbed/lasso/sims/' #+date
# N_it_max = maximum number of iterables, 3D timesteps or column files. Used for testing things quickly
# size_bin_flag bins the beards by their chord_lenth. Currently using 8 bins of 250 meters length to get started. The lowest bin should be empty, because we only calculate curtains when at least curtain_min is used
# curtain_extra: Regularized chord length before and after in the curtain, default is 1
# chord_max: Maximum number of chords. If data_dim_flag=3 it will jump to the y direction when chord_max/2 is reached
# boundary_scaling_flag: 0 nothing, 1 uses the surface fluxes and cloud base height to calculate either w/w*, thl'/thl*, or qt'/qt*
time_begin = ttiimmee.time()
dz = 25.0 #39.0625 #Is recalculated from the profile file later on
dx = 25.0
date = date_str
#1D clustering parameters in seconds, taken to agree with Lareau
if chord_times == 0:
t_gap = 20
t_min = 30
t_max = 120000
cell_min = 3 #Minimal number of cells needed per chord
curtain_min = 10 #Minimal number of cells needed to convert into a curtain
# #1D clustering parameters,
#set super strict
if chord_times == 1:
t_gap = 0.#No gaps allowed!
t_min = 0
t_max = 1e9
cell_min = 10 #Minimal number of cells needed per chord
curtain_min = 10 #Minimal number of cells needed per curtain
#value used to determine existence of cloud
ql_min = 1e-5
z_min = 10 #Index of minimum z_vlvl of the cbl
#z_min = 0 #Index of minimum z_vlvl of the cbl
#Flag clean up
if data_dim_flag==1:
scale_flag=0
#Creating dictionary to save all properties
settings_dict = {
'reg_var': reg_var,
'date_str':date_str,
'directory_input':directory_input,
'data_dim_flag':data_dim_flag,
'base_smoothing_flag':base_smoothing_flag,
'plot_curtains_flag' :plot_curtains_flag,
'base_percentile':base_percentile,
'special_name':special_name,
'scale_flag':scale_flag,
'chord_times':chord_times,
'anomaly_flag':anomaly_flag,
'N_it_max':N_it_max,
'N_it_min':N_it_min,
'size_bin_flag':size_bin_flag,
'bin_size':bin_size,
'N_bins':N_bins,
'curtain_extra':curtain_extra
}
#moved to an inner function to avoid issues with global and local variables
def func_curtain_reg(input_2d_field):
#function regularizes to cloud base
#2019-03-20: added smoother to hopefully avoid impact of harsch jumps
#2019-03-28: Added simplified version for base_smoothing_flag == 2 which gets rid of 1D pre interpolation
#I originally used interp2d, tried griddata but it was a lot slower
#Calculating the regularized t axis but for original resolution
#It is expected to go a bit beyond -1.5 and 1.5, total width defined by curtain_extra
#takes the original time vector, subtracts it by mean time, then scales it by 1/(time_end_chord-time_beg_chord)
t_reg_orig = t_1d[idx_beg_curtain:idx_end_curtain]-(time_beg_chord+time_end_chord)/2.
t_reg_orig = t_reg_orig/(time_end_chord-time_beg_chord)
#Now we calculate the new regularized grid with the correct vertical but low/original horizontal/time resolution
#mesh_t_low_z_high_x,mesh_t_low_z_high_z = np.meshgrid(t_reg_orig,z_reg_mid) #seems not to be needed
var_t_low_z_high = np.zeros([curtain_cells,n_z_reg])
#introducing z_idx_base vector
#Assigning reference cloud base where no cloud present
z_idx_base=cl_base*1.0+0.0
z_idx_base[:] = z_idx_base_default
for i in range(idx_beg_chord,idx_end_chord):
if i>idx_beg_chord-1 and i<idx_end_chord and cl_base[i]<cbl_1d[i]:
z_idx_base[i] = cl_base[i]
#Here the smoother comes into play:
#We started with a simple 5 cell running mean,
#But now we are making it a function of the chordlength, using a 0.1 running mean
if base_smoothing_flag ==1:
z_idx_base_smooth = z_idx_base*1.0
N = int(np.floor(idx_end_chord-idx_beg_chord)*0.1)
for i in range(idx_beg_chord-N,idx_end_chord+N):
z_idx_base_smooth[i] = sum(z_idx_base[i-N:i+N])/(2*N)
z_idx_base[:] = z_idx_base_smooth[:]
if base_smoothing_flag==2:
#just put the percentile back
z_idx_base[:] = z_idx_base_default
#default version for variable base height
if base_smoothing_flag<2:
#Now for each of the columns of the original curtain a vertical interpolation is done
for i in range(idx_beg_curtain,idx_end_curtain):
#assigining column value
var_orig_col = input_2d_field[:,i]
#Regularizing the z axes so that cloud base is at 1
d_z_tmp = 1.0/z_idx_base[i]
nz = var_orig_col.shape[0]
z_reg_orig_top = d_z_tmp*nz- d_z_tmp/2
z_reg_orig = np.linspace(0+d_z_tmp/2,z_reg_orig_top,nz)
#HAve to add 0 to the z_reg_orig to enable interpolation
z_reg_orig = np.hstack([[0],z_reg_orig])
var_orig_col = np.hstack([var_orig_col[0],var_orig_col])
#1D vertical interpolation to get the right columns and asign them one by one to w_x_low_z_high
#f = interp1d(z_reg_orig, var_orig_col, kind='next')
f = interp1d(z_reg_orig, var_orig_col, kind='nearest')
try:
var_reg_inter = f(z_reg_mid)
except:
print(z_idx_base[i])
print(z_reg_orig)
print(z_reg_mid)
var_t_low_z_high[i-idx_beg_curtain,:] = var_reg_inter
#Now that w_x_low_z_high we have to interpolate 2D onto the rull regularized grid
#print(t_reg_orig.shape,z_reg_mid.shape)
f = interp2d(t_reg_orig, z_reg_mid, var_t_low_z_high.transpose(), kind='linear')
var_curtain = f(t_reg_mid,z_reg_mid)
#constant base height version
if base_smoothing_flag==2:
#Regularizing the z axes so that cloud base is at 1, since z_idx_base is the same everywhere I just use idx_beg_curtain as one.
i=idx_beg_curtain
d_z_tmp = 1.0/z_idx_base[i]
var_orig_2d = input_2d_field[:,idx_beg_curtain:idx_end_curtain]
nz = var_orig_2d.shape[0]
z_reg_orig_top = d_z_tmp*nz- d_z_tmp/2
z_reg_orig = np.linspace(0+d_z_tmp/2,z_reg_orig_top,nz)
#Have to add 0 to the z_reg_orig to enable interpolation
z_reg_orig = np.hstack([[0],z_reg_orig])
var_orig_2d = np.vstack([var_orig_2d[0,:],var_orig_2d])
f = interp2d(t_reg_orig, z_reg_orig,var_orig_2d, kind='linear')
var_curtain = f(t_reg_mid,z_reg_mid)
return var_curtain
#Creating regularized grid.
d_reg = 0.005
n_z_reg = int(1.5/d_reg)
n_t_reg = int((1+2*curtain_extra)/d_reg)
t_reg_bound = np.linspace(-0.5-curtain_extra,0.5+curtain_extra ,n_t_reg+1)
t_reg_mid = np.linspace(-0.5-curtain_extra+d_reg/2,0.5+curtain_extra-d_reg/2 ,n_t_reg)
z_reg_bound = np.linspace(0,1.5 ,n_z_reg+1)
z_reg_mid = np.linspace(0+d_reg/2,1.5-d_reg/2 ,n_z_reg)
mesh_curtain_t,mesh_curtain_z = np.meshgrid(t_reg_mid,z_reg_mid)
var_curtain = np.zeros([n_t_reg,n_z_reg])
var_curtain_sum = np.zeros([n_t_reg,n_z_reg])
var_curtain_up_sum = np.zeros([n_t_reg,n_z_reg])
var_curtain_dw_sum = np.zeros([n_t_reg,n_z_reg])
n_curtain = 0
n_curtain_up = 0
n_curtain_dw = 0
if size_bin_flag==1:
N_bins = 12
n_curtain_bin = np.zeros([N_bins])
n_curtain_bin_up = np.zeros([N_bins])
n_curtain_bin_dw = np.zeros([N_bins])
var_curtain_bin_sum = np.zeros([N_bins,n_t_reg,n_z_reg])
var_curtain_bin_up_sum = np.zeros([N_bins,n_t_reg,n_z_reg])
var_curtain_bin_dw_sum = np.zeros([N_bins,n_t_reg,n_z_reg])
mid_bin_size = np.linspace(125,-125+N_bins*250,N_bins)
print('mid_bin_size',mid_bin_size)
print('looking into date: ',date)
if data_dim_flag==1:
filename_column = []
#uses glob to get all files which contain column.
column_files = glob.glob(directory_input+date+'/*column*.nc')
for c_file in column_files:
filename_column.append(c_file)
print('filename column included:',c_file)
if data_dim_flag==3:
filename_w = directory_input+date+'/w.nc'
filename_l = directory_input+date+'/ql.nc'
file_w = Dataset(filename_w,read='r')
file_ql = Dataset(filename_l,read='r')
[nz, nx, ny] = get_zxy_dimension(filename_l,'ql')
#getting variable to be regularized
filename_var = directory_input+date+'/'+reg_var+'.nc'
file_var = Dataset(filename_var,read='r')
filename_prof=glob.glob(directory_input+date+'/*default?0*.nc')[0]
#if date=='bomex':
# filename_prof=directory_input+date+'/bomex.default.0000000.nc'
file_prof = Dataset(filename_prof,read='r')
extra_string = ''
n_chords = 0
#This now a bit trickier then for the 3D version. Will have to calculate a vector for the lower time resolution of the profile,
#Then latter apply the nearest value to the full 1d time vec
#First loading surface variables from default profile
print('calculating cbl height from profile file')
T = file_prof['thl'][:,0]
p = file_prof['p'][:,0]*0.0+99709
qt = file_prof['qt'][:,0]
w2 = file_prof['w2'][:,:]
nz_prof = w2.shape[1]
var_prof = file_prof[reg_var][:,:] #needed for anomaly processing
#Just grabbing this to calculate dz
z_prof = file_prof['z'][:]
dz = z_prof[1]-z_prof[0]
print('dz: ',dz)
#for boundary scaling
total_surf_buoy_flux = file_prof['bflux'][:,1]
total_surf_thl_flux = file_prof['thlflux'][:,1]
total_surf_qt_flux = file_prof['qtflux'][:,1]
time_prof = file_prof['time'][:]
cbl_1d_prof = time_prof*0.0
#Hack together the Lifting condensation level LCL
qt_pressure = p*qt
sat_qv = 6.112*100 * np.exp(17.67 * (T - 273.15) / (T - 29.65 ))
#rel_hum = np.asmatrix(qt_pressure/sat_qv)[0]
rel_hum = qt_pressure/sat_qv
#Dewpoint
A = 17.27
B = 237.7
alpha = ((A * (T- 273.15)) / (B + (T-273.15)))
alpha = alpha + np.log(rel_hum)
dewpoint = (B * alpha) / (A - alpha)
dewpoint = dewpoint + 273.15
LCL = 125.*(T-dewpoint)
LCL_index = np.floor(LCL/dz)
#now calculate the cbl top for each profile time
for tt in range(len(time_prof)):
w_var = 1.0
z=z_min
while w_var > 0.08:
z += 1
w_var = w2[tt,z]
#w_var = np.var(w_1d[z,:])
#Mimimum of LCL +100 or variance plus 300 m
cbl_1d_prof[tt] = min(z+300/dz,LCL_index[tt])
#To avoid issues later on I set the maximum cbl height to 60 % of the domain height, but spit out a warning if it happens
if cbl_1d_prof[tt]>0.6*nz_prof:
print('warning, cbl height heigher than 0.6 domain height, could crash regularization later on, timestep: ',tt)
cbl_1d_prof[tt] = math.floor(nz*0.6)
print('resulting indexes of cbl over time: ',cbl_1d_prof)
print('calculated LCL: ',LCL_index)
#Now we either iterate over columns or timesteps
if data_dim_flag==1:
n_iter =len(filename_column)
if data_dim_flag==3:
n_iter =len(time_prof)
#Setting curtains for var
var_curtain_sum = np.zeros([n_t_reg,n_z_reg])
var_curtain_up_sum = np.zeros([n_t_reg,n_z_reg])
var_curtain_dw_sum = np.zeros([n_t_reg,n_z_reg])
n_curtain = 0
n_chord = 0
n_curtain_up = 0
n_curtain_dw = 0
#for col in filename_column:
n_iter = min(n_iter,N_it_max)
for it in range(N_it_min,n_iter):
print('n_chords: ',n_chords)
print('n_curtain: ',n_curtain)
time1 = ttiimmee.time()
if data_dim_flag ==1:
print('loading column: ',filename_column[it])
file_col = Dataset(filename_column[it],read='r')
w_2d = file_col.variables['w'][:]
w_2d = w_2d.transpose()
ql_2d = file_col.variables['ql'][:]
ql_2d = ql_2d.transpose()
t_1d = file_col.variables['time'][:]
u_2d = file_col.variables['u'][:]
u_2d = u_2d.transpose()
v_2d = file_col.variables['v'][:]
v_2d = v_2d.transpose()
print('t_1d',t_1d)
#Load the var file, even if means that we doable load w_2d or ql_2d
var_2d = file_col.variables[reg_var][:]
var_2d = var_2d.transpose()
#The needed cbl height
cbl_1d = t_1d*0
bflux_s_1d = t_1d*0
qtflux_s_1d = t_1d*0
thlflux_s_1d= t_1d*0
#Now we go through profile time snapshots and allocate the closest full time values to the profile values
dt_2 = (time_prof[1]-time_prof[0])/2
for tt in range(len(time_prof)):
cbl_1d[abs(t_1d-time_prof[tt])<dt_2] = cbl_1d_prof[tt]
bflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_buoy_flux[tt]
qtflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_qt_flux[tt]
thlflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_thl_flux[tt]
#to get anomalies we subtract the closet mean profile
if anomaly_flag==1:
for tt in range(len(time_prof)):
tmp_matrix = var_2d[:,abs(t_1d-time_prof[tt])<dt_2]
tmp_vector = var_prof[tt,:]
#because the vectors don't perfectly align
var_2d[:,abs(t_1d-time_prof[tt])<dt_2] = (tmp_matrix.transpose() - tmp_vector).transpose()
# = var_2d[:,abs(t_1d-time_prof[tt])<dt_2]-var_prof[tt,:]
if data_dim_flag ==3:
if sum(file_prof['ql'][it,:])>0.0:
print('loading timestep: ',it)
ql_3d = grab_3d_field(file_ql ,it,'ql')
w_3d = grab_3d_field(file_w ,it,'w')
var_3d = grab_3d_field(file_var ,it,reg_var)
#Here we have to do all the fuckery to turn the 3D fields into 2d slices with an imaginary time vector
w_2d = np.array(w_3d.reshape((nz,nx*ny)))
ql_2d = np.array(ql_3d.reshape((nz,nx*ny)))
var_2d = np.array(var_3d.reshape((nz,nx*ny)))
#Now we do the same thing with the transposed field, use to be an either or, now just add it on
w_3d = np.transpose( w_3d, (0, 2, 1))
ql_3d = np.transpose(ql_3d, (0, 2, 1))
var_3d = np.transpose(var_3d, (0, 2, 1))
#globals().update(locals())
w_2d = np.hstack([w_2d ,np.array(w_3d.reshape((nz,nx*ny)))])
ql_2d = np.hstack([ql_2d ,np.array(ql_3d.reshape((nz,nx*ny)))])
var_2d = np.hstack([var_2d ,np.array(var_3d.reshape((nz,nx*ny)))])
#Should now be able to delete 3d fields as they aren't needed anymore, not sure if that helps save any memory though
del w_3d
del ql_3d
del var_3d
gc.collect()
#Switching to anomalies if anomaly flag is used
if anomaly_flag==1:
#because the vectors don't perfectly align
var_2d[:,:] = (var_2d.transpose() - var_prof[it,:]).transpose()
#to get the fake time vector we load the wind from the profile data, which devided by the grid spacing gives us a fake time resolution
#we use the calculated cbl+300 meter or lcl as reference height
ref_lvl = cbl_1d_prof[it]
u_ref = file_prof['u'][it,ref_lvl]
v_ref = file_prof['v'][it,ref_lvl]
V_ref = np.sqrt(u_ref**2+v_ref**2)
time_resolution = dx/V_ref
print('time iterative, V_ref, time_resolution',it, V_ref, time_resolution )
print('ref_lvl used to determine reference winds',ref_lvl )
#fake t vector,
t_1d = np.linspace(0,2*nx*ny*time_resolution,2*nx*ny)#+nx*ny*time_resolution*it
else:
#If no clouds are present we pass a very short empty fields over to the chord searcher
print('skipping timestep: ',it,' cause no clouds')
ql_2d = np.zeros((nz,1))
w_2d = np.zeros((nz,1))
var_2d = np.zeros((nz,1))
t_1d = np.zeros(1)
#The needed cbl height, which constant everywhere
cbl_1d = t_1d*0
cbl_1d[:] = cbl_1d_prof[it]
#The needed surface buoyancy flux, which is constant everywhere
bflux_s_1d = t_1d*0 + total_surf_buoy_flux[it]
qtflux_s_1d = t_1d*0 + total_surf_qt_flux[it]
thlflux_s_1d = t_1d*0 + total_surf_thl_flux[it]
time2 = ttiimmee.time()
print('loading time:',(time2-time1)*1.0,)
### Detecting lowest cloud cell is within 300 m of CBL
nt = len(cbl_1d)
cl_base = np.zeros(nt)
#Detecting all cloudy cells
#Use to have a different method using nans that doesn:t work anymore somehow. Now I just set it really high where there is no cloud.
for t in range(nt):
if np.max(ql_2d[:,t])>ql_min :
cl_base[t]=np.argmax(ql_2d[:,t]>ql_min)
else:
cl_base[t]=10000000
cl_base=cl_base.astype(int)
#Now find c base lower than the max height
cbl_cl_idx = np.where((cl_base-cbl_1d[:nt])*dz<0)[0]
cbl_cl_binary = cl_base*0
cbl_cl_binary[cbl_cl_idx]=1
t_cbl_cl=t_1d[cbl_cl_idx]
#Scaling between x and y is calculated here if required. Is skipped if there are less than 2 timesteps, which is what is assigned when no clouds are present
if scale_flag > 0 and t_1d.shape[0]>3:
#calculate the profiles of u and v and their scaling
u_ref_prof = file_prof['u'][it,:]
v_ref_prof = file_prof['v'][it,:]
V_ref_prof = np.sqrt(u_ref_prof**2+v_ref_prof**2)
scaling_factor_x_prof = u_ref_prof/V_ref_prof
scaling_factor_y_prof = v_ref_prof/V_ref_prof
#Using the mean cloud base height as the reference lvl
ref_idx = np.mean(cl_base[cbl_cl_idx])
if scale_flag == 1:
#a new reference level is com
scaling_factor_x = scaling_factor_x_prof[int(ref_idx)]
scaling_factor_y = scaling_factor_y_prof[int(ref_idx)]
print('Scaling flag 1: scaling factor_x: ',scaling_factor_x,' scaling factor_y: ',scaling_factor_y, ' int(ref_idx): ',int(ref_idx))
if scale_flag == 2:
#Regularizing the scaling profiles and interpolation them onto the regularized z axis
d_z_tmp = 1.0/ref_idx
nz = scaling_factor_x_prof.shape[0]
z_reg_orig_top = d_z_tmp*nz-d_z_tmp/2
z_reg_orig = np.linspace(0+d_z_tmp/2,z_reg_orig_top,nz)
#HAve to add 0 to the z_reg_orig to enable interpolation
z_reg_orig = np.hstack([[0],z_reg_orig])
scaling_factor_x_prof_ext = np.hstack([scaling_factor_x_prof[0],scaling_factor_x_prof])
scaling_factor_y_prof_ext = np.hstack([scaling_factor_y_prof[0],scaling_factor_y_prof])
#1D vertical interpolation to get the right columns and asign them one by one to w_x_low_z_high
f_x = interp1d(z_reg_orig, scaling_factor_x_prof_ext, kind='nearest')
f_y = interp1d(z_reg_orig, scaling_factor_y_prof_ext, kind='nearest')
scaling_factor_x_inter = f_x(z_reg_mid)
scaling_factor_y_inter = f_y(z_reg_mid)
print('Scaling flag 2:, mean scaling_factor_x_inter: ',np.mean(scaling_factor_x_inter),
' mean scaling_factor_y_inter: ',np.mean(scaling_factor_y_inter))
### Clustering 1D
#Now we simply go through all cloudy timesteps
#As long as the difference to the next cloudy timestep is lower than t_gap it counts as the same cloud
#As an additional contraint, if the cloudy cells are right next to each other they are always counted as consecutive, not matter the time distance between them.
#if the difference is larger than 20s the cloud is over, and a chordlength is created which is a list of all timesteps that below to that chordlength
#However if the duration of the chordlength is lower than t_min or higher than t_max seconds it isn't
#I added an additional constraint that each chord must include at least cell_min cells, because it is possible to get
#Small chord lengths with more than t_min which are mostly gaps.
t_cloudy_idx = 0
#n_chords = 0
chord_idx_list = []
print('iterating through step ',it,'which contains ',len(cbl_cl_idx),'cloudy columns')
while t_cloudy_idx < len(cbl_cl_idx)-1 and n_chords<chord_max:
#print('t_chord_begin',t_chord_begin)
t_chord_begin = t_cloudy_idx
#now connecting all cloudy indexes
while t_cloudy_idx < len(cbl_cl_idx)-1 and (cbl_cl_idx[t_cloudy_idx+1]==cbl_cl_idx[t_cloudy_idx]+1 or t_cbl_cl[t_cloudy_idx+1]-t_cbl_cl[t_cloudy_idx]<t_gap):
t_cloudy_idx += 1
t_chord_end = t_cloudy_idx
#print('t_chord_end',t_chord_end)
#Checking if it fulfils chord criteria regaring time
#we also added a minimum height of 100 m to screen out fog/dew stuff at the surface
if t_chord_end-t_chord_begin>cell_min:
chord_z_min = np.min(cl_base[cbl_cl_idx[t_chord_begin:t_chord_end]])
chord_duration = t_cbl_cl[t_chord_end]-t_cbl_cl[t_chord_begin]
else:
chord_z_min = 0
chord_duration = 0
if chord_duration>t_min and chord_duration<t_max and chord_z_min > 4:
if t_chord_end-t_chord_begin>cell_min-1:
n_chords += 1
#chord_idx_list.append(list(cbl_cl_idx[t_chord_begin:t_cloudy_idx]))
#Here we start the interpolation stuff
#Getting the chord beginning and end
idx_beg_chord = cbl_cl_idx[t_chord_begin]
idx_end_chord = cbl_cl_idx[t_chord_end]
time_beg_chord = t_1d[idx_beg_chord]
time_end_chord = t_1d[idx_end_chord]
#Calculate the beginning and end of the curtain, we add a bit to to each side to make interpolation easy
idx_beg_curtain = (np.abs(t_1d - (time_beg_chord-curtain_extra*(time_end_chord-time_beg_chord)))).argmin()-1
idx_end_curtain = (np.abs(t_1d - (time_end_chord+curtain_extra*(time_end_chord-time_beg_chord)))).argmin()+2
idx_end_curtain = min(idx_end_curtain,nt-1)
time_beg_curtain = t_1d[idx_beg_curtain]
time_end_curtain = t_1d[idx_end_curtain]
chord_cells = t_chord_end-t_chord_begin
curtain_cells = idx_end_curtain-idx_beg_curtain
#If curtain has more than curtain_min cells and curtain tail noes not extend beyond end of 2d field or the beginning extend before
#I added 2 cells buffer at the beginning and end, because for the interpolation a bit of overlap is used.
if idx_end_curtain<nt-2 and idx_beg_curtain>2 and len(cbl_cl_idx[t_chord_begin:t_chord_end])>curtain_min-1:
n_curtain += 1
#First thing to do is calculate the chord base using the 25 percentile in agreement with Neil
z_idx_base_default = math.floor(np.percentile(cl_base[cbl_cl_idx[t_chord_begin:t_cloudy_idx]],base_percentile))
#Regularized curtains, I am too lazy to pass on all my variables to func_curtain_reg so I instead made it a nested function
var_curtain_tmp = (func_curtain_reg(var_2d)).transpose()
if boundary_scaling_flag == 1:
#Now adding the boundary scaling using w*
surf_flux = np.mean(bflux_s_1d[idx_beg_chord:idx_end_chord])
base_height = z_idx_base_default*dz
w_star=(base_height*surf_flux)**(1/3)
if reg_var=='w':
boundary_scaling = w_star
if reg_var=='qt':
surf_flux = np.mean(qtflux_s_1d[idx_beg_chord:idx_end_chord])
boundary_scaling = surf_flux/w_star
if reg_var=='thl':
thl_flux = np.mean(thlflux_s_1d[idx_beg_chord:idx_end_chord])
boundary_scaling = surf_flux/w_star
var_curtain_tmp = var_curtain_tmp/boundary_scaling
#Finally add it to the mean one and track one more curtain
#detecting if chord base has a positive or negative w, then adds to the sum of up or downdraft chords
w_tmp = w_2d[cl_base[cbl_cl_idx[t_chord_begin:t_cloudy_idx]]-1,cbl_cl_idx[t_chord_begin:t_chord_end]]
#print(w_tmp)
#Scaling is now added here,
#Things are applied twice so that deviding by n it comes out fin
#We assume here that n_x and n_y are roughly same
#Could be made cleaner later on
if scale_flag>0 and data_dim_flag==3:
if scale_flag==1:
#find out if we need scaling_factor_x or y by seeing if we are in the first or second half
if idx_end_curtain<nt/2:
scaling_factor = 2*scaling_factor_x
else:
scaling_factor = 2*scaling_factor_y
if scaling_factor>0:
var_curtain_tmp = var_curtain_tmp[::-1,:]
var_curtain_tmp = abs(scaling_factor) * var_curtain_tmp
if scale_flag==2:
if idx_end_curtain<nt/2:
scaling_factor_prof = 2*scaling_factor_x_inter
else:
scaling_factor_prof = 2*scaling_factor_y_inter
for n_prof in range(scaling_factor_prof.shape[0]):
if scaling_factor_prof[n_prof]>0:
var_curtain_tmp[:,n_prof] = var_curtain_tmp[::-1,n_prof]
var_curtain_tmp [:,n_prof]= abs(scaling_factor_prof[n_prof])*var_curtain_tmp[:,n_prof]
#Now adding the var_curtain_tmp to the sums
var_curtain_sum = var_curtain_sum+var_curtain_tmp
if np.mean(w_tmp)>0.:
n_curtain_up += 1
var_curtain_up_sum += var_curtain_tmp
elif np.mean(w_tmp)<0.:
n_curtain_dw += 1
var_curtain_dw_sum += var_curtain_tmp
else:
print('wtf how is this zero: ',np.mean(w_tmp),w_tmp)
#globals().update(locals())
###############################################################################################################################################
################## SIZE BINNING ##############################################################################################################
###############################################################################################################################################
if size_bin_flag:
#getting V_ref if data_dim_flag==1. Is calculated directly from the cloud base speeds
if data_dim_flag==1:
ch_idx_l = list(cbl_cl_idx[t_chord_begin:t_chord_end])
u_ref=np.mean(u_2d[cl_base[ch_idx_l],ch_idx_l])
v_ref=np.mean(v_2d[cl_base[ch_idx_l],ch_idx_l])
V_ref=np.sqrt(u_ref**2+v_ref**2)
ch_duration = t_cbl_cl[t_chord_end]-t_cbl_cl[t_chord_begin]
chord_length = ch_duration*V_ref
#if scale_flag==0:
# scaling_factor=1.
#find index of bin close to mid size bin
bin_idx = np.where(np.abs(chord_length-mid_bin_size)<125)[0]
if bin_idx.size>0:
#print('bin_idx,chord_length',bin_idx,chord_length)
n_curtain_bin[bin_idx] += 1
var_curtain_bin_sum[bin_idx,:,:] = var_curtain_bin_sum[bin_idx,:,:] + var_curtain_tmp
if np.mean(w_tmp)>0.:
n_curtain_bin_up[bin_idx] += 1
var_curtain_bin_up_sum[bin_idx,:,:] += var_curtain_tmp
elif np.mean(w_tmp)<0.:
n_curtain_bin_dw[bin_idx] += 1
var_curtain_bin_dw_sum[bin_idx,:,:] += var_curtain_tmp
else:
print('wtf how is this zero: ',np.mean(w_tmp),w_tmp)
##############################################################################################################################
#PLOTS
##############################################################################################################################
#If the plot flag is set the pre regularization curtains are plotted.
if plot_curtains_flag ==1:
print('plotting not implemented yet')
##############################################################################################################################
#switching to y direction if half of max chords reached
##############################################################################################################################
if n_chords == int(chord_max/2):
t_cloudy_idx = int(len(cbl_cl_idx)/2)
t_cloudy_idx += 1
time3 = ttiimmee.time()
print('curtain processing:',(time3-time2)/60.0,'minutes')
print(':')
print(':')
print(':')
time_end = ttiimmee.time()
print('total run time of proc_beard_regularize in minutes: ',(time_end-time_begin)/60.)
print(':')
print(':')
print(':')
#Replacing saving with xarray
xr_dataset = xr.Dataset(
data_vars = {reg_var :(('regularized height', 'regularized time'), var_curtain_sum.transpose()/n_curtain),
reg_var+'_up':(('regularized height', 'regularized time'), var_curtain_up_sum.transpose()/n_curtain_up),
reg_var+'_dw':(('regularized height', 'regularized time'), var_curtain_dw_sum.transpose()/n_curtain_dw)},
coords={'regularized time':t_reg_mid, 'regularized height':z_reg_mid})
xr_dataset[reg_var].attrs['n']=n_curtain
xr_dataset[reg_var+'_up'].attrs['n']=n_curtain_up
xr_dataset[reg_var+'_dw'].attrs['n']=n_curtain_dw
xr_dataset.attrs = settings_dict
#Making save string
save_string_base = '_beard_'+date+'_d'+str(data_dim_flag)+'_cb'+str(base_smoothing_flag)+'_an'+str(anomaly_flag)+'_ct'+str(chord_times)+'_ce'+str(int(curtain_extra))
if data_dim_flag==3:
save_string_base = save_string_base+'_sf'+str(scale_flag)
if N_it_min>0:
save_string_base = save_string_base+'_Nmin'+str(N_it_min)
if N_it_max<1e9:
save_string_base = save_string_base+'_Nmax'+str(n_iter)
if boundary_scaling_flag==1:
save_string_base = 'star'+save_string_base
save_string_base = save_string_base+'_'+special_name+'_N'+str(n_curtain)
save_string = directory_output+ reg_var+save_string_base +'.nc'
xr_dataset.to_netcdf(save_string)
print('saved beard data to '+save_string)
if size_bin_flag==1:
xr_dataset = xr.Dataset(
data_vars = {reg_var :(('regularized height', 'regularized time','length'), var_curtain_bin_sum.transpose()/n_curtain_bin),
reg_var+'_up':(('regularized height', 'regularized time','length'), var_curtain_bin_up_sum.transpose()/n_curtain_bin_up),
reg_var+'_dw':(('regularized height', 'regularized time','length'), var_curtain_bin_dw_sum.transpose()/n_curtain_bin_dw)},
coords={'regularized time':t_reg_mid, 'regularized height':z_reg_mid, 'length':mid_bin_size})
xr_dataset[reg_var].attrs['n'] =n_curtain_bin
xr_dataset[reg_var+'_up'].attrs['n'] =n_curtain_bin_up
xr_dataset[reg_var+'_dw'].attrs['n'] =n_curtain_bin_dw
xr_dataset.attrs = settings_dict
save_string = directory_output+ reg_var+save_string_base+'_sizebin.nc'
xr_dataset.to_netcdf(save_string)
print('saved size binned beards to '+save_string)
print(':')
print(':')
print(':')
print(':')
print(':')
return
#A simple script which calculates a histogram below the cloud base and saves it
#I will try to keep it at least somewhat general with a flexible variable
def proc_pdf(reg_var = 'w',
date_str='20160611',
directory_input ='/data/testbed/lasso/sims/',
directory_output ='data_pdfs/',
data_dim_flag=3,
special_name='',
N_it_max=1e9,
N_it_min=0,
anomaly_flag =0,
N_bins=400,
base_percentile = 25,
boundary_scaling_flag = 1,
range_var = [-10,10] ):
#We are starting out with histograms of w from -10 to 10 and a 0.1 spacing
var_hist_sum=np.zeros(N_bins)
date = date_str
#value used to determine existence of cloud
ql_min = 1e-5
z_min = 10 #Index of minimum z_vlvl of the cbl
print('looking into date: ',date)
if data_dim_flag==1:
filename_column = []
#uses glob to get all files which contain column.
column_files = glob.glob(directory_input+date+'/*.column.*.*.*.nc')
for c_file in column_files:
filename_column.append(c_file)
print('filename column included:',c_file)
if data_dim_flag==3:
filename_w = directory_input+date+'/w.nc'
filename_l = directory_input+date+'/ql.nc'
file_w = Dataset(filename_w,read='r')
file_ql = Dataset(filename_l,read='r')
[nz, nx, ny] = get_zxy_dimension(filename_l,'ql')
#getting variable to be regularized
filename_var = directory_input+date+'/'+reg_var+'.nc'
file_var = Dataset(filename_var,read='r')
filename_prof=glob.glob(directory_input+date+'/testbed?default?0*.nc')[0]
#filename_prof=directory_input+date+'/testbed.default.0000000.nc'
if date=='bomex':
filename_prof=directory_input+date+'/bomex.default.0000000.nc'
file_prof = Dataset(filename_prof,read='r')
extra_string = ''
#This now a bit trickier then for the 3D version. Will have to calculate a vector for the lower time resolution of the profile,
#Then latter apply the nearest value to the full 1d time vec
#First loading surface variables from default profile
print('calculating cbl height from profile file')
T = file_prof['thl'][:,0]
p = file_prof['p'][:,0]*0.0+99709
qt = file_prof['qt'][:,0]
w2 = file_prof['w2'][:,:]
nz_prof = w2.shape[1]
var_prof = file_prof[reg_var][:,:] #needed for anomaly processing
#Just grabbing this to calculate dz
z_prof = file_prof['z'][:]
dz = z_prof[1]-z_prof[0]
print('dz: ',dz)
#for boundary scaling
total_surf_buoy_flux = file_prof['bflux'][:,1]
total_surf_thl_flux = file_prof['thlflux'][:,1]
total_surf_qt_flux = file_prof['qtflux'][:,1]
time_prof = file_prof['time'][:]
cbl_1d_prof = time_prof*0.0
#Hack together the Lifting condensation level LCL
qt_pressure = p*qt
sat_qv = 6.112*100 * np.exp(17.67 * (T - 273.15) / (T - 29.65 ))
#rel_hum = np.asmatrix(qt_pressure/sat_qv)[0]
rel_hum = qt_pressure/sat_qv
#Dewpoint
A = 17.27
B = 237.7
alpha = ((A * (T- 273.15)) / (B + (T-273.15)))
alpha = alpha + np.log(rel_hum)
dewpoint = (B * alpha) / (A - alpha)
dewpoint = dewpoint + 273.15
LCL = 125.*(T-dewpoint)
LCL_index = np.floor(LCL/dz)
#now calculate the cbl top for each profile time
for tt in range(len(time_prof)):
w_var = 1.0
z=z_min
while w_var > 0.08:
z += 1
w_var = w2[tt,z]
#w_var = np.var(w_1d[z,:])
#Mimimum of LCL +100 or variance plus 300 m
cbl_1d_prof[tt] = min(z+300/dz,LCL_index[tt])
#To avoid issues later on I set the maximum cbl height to 60 % of the domain height, but spit out a warning if it happens
if cbl_1d_prof[tt]>0.6*nz_prof:
print('warning, cbl height heigher than 0.6 domain height, could crash regularization later on, timestep: ',tt)
cbl_1d_prof[tt] = math.floor(nz*0.6)
print('resulting indexes of cbl over time: ',cbl_1d_prof)
print('calculated LCL: ',LCL_index)
#Now we either iterate over columns or timesteps
if data_dim_flag==1:
n_iter =len(filename_column)
if data_dim_flag==3:
n_iter =len(time_prof)
#for col in filename_column:
n_iter = min(n_iter,N_it_max)
for it in range(N_it_min,n_iter):
time1 = ttiimmee.time()
if data_dim_flag ==1:
print('loading column: ',filename_column[it])
file_col = Dataset(filename_column[it],read='r')
w_2d = file_col.variables['w'][:]
w_2d = w_2d.transpose()
ql_2d = file_col.variables['ql'][:]
ql_2d = ql_2d.transpose()
t_1d = file_col.variables['time'][:]
print('t_1d',t_1d)
#Load the var file, even if means that we doable load w_2d or ql_2d
var_2d = file_col.variables[reg_var][:]
var_2d = var_2d.transpose()
#The needed cbl height
cbl_1d = t_1d*0
bflux_s_1d = t_1d*0
qtflux_s_1d = t_1d*0
thlflux_s_1d= t_1d*0
#Now we go through profile time snapshots and allocate the closest full time values to the profile values
dt_2 = (time_prof[1]-time_prof[0])/2
for tt in range(len(time_prof)):
cbl_1d[abs(t_1d-time_prof[tt])<dt_2] = cbl_1d_prof[tt]
bflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_buoy_flux[tt]
qtflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_qt_flux[tt]
thlflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_thl_flux[tt]
#to get anomalies we subtract the closet mean profile
if anomaly_flag==1:
for tt in range(len(time_prof)):
tmp_matrix = var_2d[:,abs(t_1d-time_prof[tt])<dt_2]
tmp_vector = var_prof[tt,:]
#because the vectors don't perfectly align
var_2d[:,abs(t_1d-time_prof[tt])<dt_2] = (tmp_matrix.transpose() - tmp_vector).transpose()
# = var_2d[:,abs(t_1d-time_prof[tt])<dt_2]-var_prof[tt,:]
if data_dim_flag ==3:
if sum(file_prof['ql'][it,:])>0.0:
print('loading timestep: ',it)
ql_3d = grab_3d_field(file_ql ,it,'ql')
w_3d = grab_3d_field(file_w ,it,'w')
var_3d = grab_3d_field(file_var ,it,reg_var)
#Here we have to do all the fuckery to turn the 3D fields into 2d slices with an imaginary time vector
w_2d = np.array(w_3d.reshape((nz,nx*ny)))
ql_2d = np.array(ql_3d.reshape((nz,nx*ny)))
var_2d = np.array(var_3d.reshape((nz,nx*ny)))
#Now we do the same thing with the transposed field, use to be an either or, now just add it on
w_3d = np.transpose( w_3d, (0, 2, 1))
ql_3d = np.transpose(ql_3d, (0, 2, 1))
var_3d = np.transpose(var_3d, (0, 2, 1))
#globals().update(locals())
w_2d = np.hstack([w_2d ,np.array(w_3d.reshape((nz,nx*ny)))])
ql_2d = np.hstack([ql_2d ,np.array(ql_3d.reshape((nz,nx*ny)))])
var_2d = np.hstack([var_2d ,np.array(var_3d.reshape((nz,nx*ny)))])
#This might save a bit of memory
if reg_var == 'w':
var_2d = w_2d
if reg_var == 'ql':
var_2d = ql_2d
#Should now be able to delete 3d fields as they aren't needed anymore, not sure if that helps save any memory though
del w_3d
del ql_3d
del var_3d
gc.collect()
#fake t vector,
t_1d = np.linspace(0,2*nx*ny,2*nx*ny)
#Switching to anomalies if anomaly flag is used
if anomaly_flag==1:
#because the vectors don't perfectly align
var_2d[:,:] = (var_2d.transpose() - var_prof[it,:]).transpose()
#to get the fake time vector we load the wind from the profile data, which devided by the grid spacing gives us a fake time resolution
#we use the calculated cbl+300 meter or lcl as reference height
ref_lvl = cbl_1d_prof[it]
else:
#If no clouds are present we pass a very short empty fields over to the chord searcher
print('skipping timestep: ',it,' cause no clouds')
ql_2d = np.zeros((nz,1))
w_2d = np.zeros((nz,1))
var_2d = np.zeros((nz,1))
t_1d = np.zeros(1)
#The needed cbl height, which constant everywhere
cbl_1d = t_1d*0
cbl_1d[:] = cbl_1d_prof[it]
#The needed surface buoyancy flux, which is constant everywhere
bflux_s_1d = t_1d*0 + total_surf_buoy_flux[it]
qtflux_s_1d = t_1d*0 + total_surf_qt_flux[it]
thlflux_s_1d = t_1d*0 + total_surf_thl_flux[it]
time2 = ttiimmee.time()
print('loading time:',(time2-time1)*1.0,)
### Detecting lowest cloud cell is within 300 m of CBL
nt = len(cbl_1d)
cl_base = np.zeros(nt)
#Detecting all cloudy cells
#Use to have a different method using nans that doesn:t work anymore somehow. Now I just set it really high where there is no cloud.
for t in range(nt):
if np.max(ql_2d[:,t])>ql_min :
cl_base[t]=np.argmax(ql_2d[:,t]>ql_min)
else:
cl_base[t]=10000000
cl_base=cl_base.astype(int)
#Now find c base lower than the max height
cbl_cl_idx = np.where((cl_base-cbl_1d[:nt])*dz<0)[0]
cbl_cl_binary = cl_base*0
cbl_cl_binary[cbl_cl_idx]=1
print('iterating through step ',it,'which contains ',len(cbl_cl_idx),'cloudy columns')
if len(cbl_cl_idx)>0:
#Now calculating the var at cloud base
var_cl_base=var_2d[cl_base[cbl_cl_idx]-1,cbl_cl_idx]
#If boundary scaling is used, the variable is scaled accordingly
#Only called if there are any clouds
if boundary_scaling_flag == 1 and len(cbl_cl_idx)>1:
#First thing to do is calculate the chord base using the 25 percentile in agreement with Neil
if data_dim_flag==3:
z_idx_base_default = math.floor(np.percentile(cl_base[cbl_cl_idx],base_percentile))
# Can't think of a good way to do this, will throw up an error for the mean time.
if data_dim_flag==1:
print('sorry, but I havent implemented star scaling for 1d data')
sys.exit()
#Now adding the boundary scaling using w*
#Is a bit overcooked currently as it only works with 3D data and thus all surface fluxes are the same everywhere.
surf_flux = np.mean(bflux_s_1d)
base_height = z_idx_base_default*dz
w_star=(base_height*surf_flux)**(1/3)
if reg_var=='w':
boundary_scaling = w_star
if reg_var=='qt':
surf_flux = np.mean(qtflux_s_1d)
boundary_scaling = surf_flux/w_star
if reg_var=='thl':
thl_flux = np.mean(thlflux_s_1d)
boundary_scaling = surf_flux/w_star
var_cl_base = var_cl_base/boundary_scaling
#Calculating the histogram, and adding it to the total histogram
var_hist,bin_edges = np.histogram(var_cl_base,range=range_var,bins=N_bins)
var_hist_sum = var_hist_sum+var_hist
else:
print('no cloudy columns apparently')
var_pdf = var_hist_sum
save_string_base = '_pdf_'+date+'_d'+str(data_dim_flag)+'_an'+str(anomaly_flag)
if N_it_min>0:
save_string_base = save_string_base+'_Nmin'+str(N_it_min)
if N_it_max<1e9:
save_string_base = save_string_base+'_Nmax'+str(n_iter)
if boundary_scaling_flag==1:
save_string_base = 'star'+save_string_base
save_string = directory_output+ reg_var+save_string_base
save_string = save_string+'.npz'
np.savez(save_string,var_pdf=var_pdf,range_var=range_var)
print('saved pdf with ', sum(var_pdf), 'points to '+save_string)
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
return
| [((60, 17, 60, 32), 'time.time', 'ttiimmee.time', ({}, {}), '()', True, 'import time as ttiimmee\n'), ((69, 18, 69, 49), 'numpy.array', 'np.array', ({(69, 27, 69, 48): '[5, 10, 35, 50, 65, 90, 95]'}, {}), '([5, 10, 35, 50, 65, 90, 95])', True, 'import numpy as np\n'), ((129, 18, 129, 49), 'netCDF4.Dataset', 'Dataset', (), '', False, 'from netCDF4 import Dataset\n'), ((156, 32, 156, 59), 'numpy.zeros', 'np.zeros', ({(156, 41, 156, 58): '[0, n_percentiles]'}, {}), '([0, n_percentiles])', True, 'import numpy as np\n'), ((157, 32, 157, 59), 'numpy.zeros', 'np.zeros', ({(157, 41, 157, 58): '[0, n_percentiles]'}, {}), '([0, n_percentiles])', True, 'import numpy as np\n'), ((197, 16, 197, 32), 'numpy.floor', 'np.floor', ({(197, 25, 197, 31): 'LCL / dz'}, {}), '(LCL / dz)', True, 'import numpy as np\n'), ((538, 20, 538, 47), 'numpy.asarray', 'np.asarray', ({(538, 31, 538, 46): 'chord_timesteps'}, {}), '(chord_timesteps)', True, 'import numpy as np\n'), ((539, 20, 539, 46), 'numpy.asarray', 'np.asarray', ({(539, 31, 539, 45): 'chord_duration'}, {}), '(chord_duration)', True, 'import numpy as np\n'), ((540, 20, 540, 44), 'numpy.asarray', 'np.asarray', ({(540, 31, 540, 43): 'chord_length'}, {}), '(chord_length)', True, 'import numpy as np\n'), ((541, 20, 541, 44), 'numpy.asarray', 'np.asarray', ({(541, 31, 541, 43): 'chord_height'}, {}), '(chord_height)', True, 'import numpy as np\n'), ((542, 20, 542, 44), 'numpy.asarray', 'np.asarray', ({(542, 31, 542, 43): 'chord_w_base'}, {}), '(chord_w_base)', True, 'import numpy as np\n'), ((543, 20, 543, 44), 'numpy.asarray', 'np.asarray', ({(543, 31, 543, 43): 'chord_w_star'}, {}), '(chord_w_star)', True, 'import numpy as np\n'), ((544, 20, 544, 46), 'numpy.asarray', 'np.asarray', ({(544, 31, 544, 45): 'chord_thl_star'}, {}), '(chord_thl_star)', True, 'import numpy as np\n'), ((545, 20, 545, 45), 'numpy.asarray', 'np.asarray', ({(545, 31, 545, 44): 'chord_qt_star'}, {}), '(chord_qt_star)', True, 'import numpy as np\n'), ((546, 20, 546, 39), 'numpy.asarray', 'np.asarray', ({(546, 31, 546, 38): 'chord_w'}, {}), '(chord_w)', True, 'import numpy as np\n'), ((547, 20, 547, 42), 'numpy.asarray', 'np.asarray', ({(547, 31, 547, 41): 'chord_w_up'}, {}), '(chord_w_up)', True, 'import numpy as np\n'), ((548, 20, 548, 44), 'numpy.asarray', 'np.asarray', ({(548, 31, 548, 43): 'chord_w_flux'}, {}), '(chord_w_flux)', True, 'import numpy as np\n'), ((549, 20, 549, 41), 'numpy.asarray', 'np.asarray', ({(549, 31, 549, 40): 'chord_thl'}, {}), '(chord_thl)', True, 'import numpy as np\n'), ((550, 20, 550, 44), 'numpy.asarray', 'np.asarray', ({(550, 31, 550, 43): 'chord_thl_25'}, {}), '(chord_thl_25)', True, 'import numpy as np\n'), ((551, 20, 551, 44), 'numpy.asarray', 'np.asarray', ({(551, 31, 551, 43): 'chord_thl_75'}, {}), '(chord_thl_75)', True, 'import numpy as np\n'), ((552, 20, 552, 40), 'numpy.asarray', 'np.asarray', ({(552, 31, 552, 39): 'chord_qt'}, {}), '(chord_qt)', True, 'import numpy as np\n'), ((553, 20, 553, 43), 'numpy.asarray', 'np.asarray', ({(553, 31, 553, 42): 'chord_qt_25'}, {}), '(chord_qt_25)', True, 'import numpy as np\n'), ((554, 20, 554, 43), 'numpy.asarray', 'np.asarray', ({(554, 31, 554, 42): 'chord_qt_75'}, {}), '(chord_qt_75)', True, 'import numpy as np\n'), ((555, 20, 555, 42), 'numpy.asarray', 'np.asarray', ({(555, 31, 555, 41): 'chord_time'}, {}), '(chord_time)', True, 'import numpy as np\n'), ((576, 9, 578, 99), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((580, 15, 580, 30), 'time.time', 'ttiimmee.time', ({}, {}), '()', True, 'import time as ttiimmee\n'), ((653, 17, 653, 32), 'time.time', 'ttiimmee.time', ({}, {}), '()', True, 'import time as ttiimmee\n'), ((844, 23, 844, 83), 'numpy.linspace', 'np.linspace', ({(844, 35, 844, 53): '-0.5 - curtain_extra', (844, 54, 844, 71): '0.5 + curtain_extra', (844, 73, 844, 82): 'n_t_reg + 1'}, {}), '(-0.5 - curtain_extra, 0.5 + curtain_extra, n_t_reg + 1)', True, 'import numpy as np\n'), ((845, 23, 845, 98), 'numpy.linspace', 'np.linspace', ({(845, 35, 845, 61): '-0.5 - curtain_extra + d_reg / 2', (845, 62, 845, 87): '0.5 + curtain_extra - d_reg / 2', (845, 90, 845, 97): 'n_t_reg'}, {}), '(-0.5 - curtain_extra + d_reg / 2, 0.5 + curtain_extra - d_reg /\n 2, n_t_reg)', True, 'import numpy as np\n'), ((846, 23, 846, 72), 'numpy.linspace', 'np.linspace', ({(846, 35, 846, 36): '0', (846, 37, 846, 40): '1.5', (846, 62, 846, 71): 'n_z_reg + 1'}, {}), '(0, 1.5, n_z_reg + 1)', True, 'import numpy as np\n'), ((847, 23, 847, 70), 'numpy.linspace', 'np.linspace', ({(847, 35, 847, 44): '0 + d_reg / 2', (847, 45, 847, 56): '1.5 - d_reg / 2', (847, 62, 847, 69): 'n_z_reg'}, {}), '(0 + d_reg / 2, 1.5 - d_reg / 2, n_z_reg)', True, 'import numpy as np\n'), ((849, 36, 849, 68), 'numpy.meshgrid', 'np.meshgrid', ({(849, 48, 849, 57): 't_reg_mid', (849, 58, 849, 67): 'z_reg_mid'}, {}), '(t_reg_mid, z_reg_mid)', True, 'import numpy as np\n'), ((850, 22, 850, 49), 'numpy.zeros', 'np.zeros', ({(850, 31, 850, 48): '[n_t_reg, n_z_reg]'}, {}), '([n_t_reg, n_z_reg])', True, 'import numpy as np\n'), ((851, 22, 851, 49), 'numpy.zeros', 'np.zeros', ({(851, 31, 851, 48): '[n_t_reg, n_z_reg]'}, {}), '([n_t_reg, n_z_reg])', True, 'import numpy as np\n'), ((853, 25, 853, 52), 'numpy.zeros', 'np.zeros', ({(853, 34, 853, 51): '[n_t_reg, n_z_reg]'}, {}), '([n_t_reg, n_z_reg])', True, 'import numpy as np\n'), ((854, 25, 854, 52), 'numpy.zeros', 'np.zeros', ({(854, 34, 854, 51): '[n_t_reg, n_z_reg]'}, {}), '([n_t_reg, n_z_reg])', True, 'import numpy as np\n'), ((904, 18, 904, 49), 'netCDF4.Dataset', 'Dataset', (), '', False, 'from netCDF4 import Dataset\n'), ((952, 16, 952, 32), 'numpy.floor', 'np.floor', ({(952, 25, 952, 31): 'LCL / dz'}, {}), '(LCL / dz)', True, 'import numpy as np\n'), ((981, 22, 981, 49), 'numpy.zeros', 'np.zeros', ({(981, 31, 981, 48): '[n_t_reg, n_z_reg]'}, {}), '([n_t_reg, n_z_reg])', True, 'import numpy as np\n'), ((982, 25, 982, 52), 'numpy.zeros', 'np.zeros', ({(982, 34, 982, 51): '[n_t_reg, n_z_reg]'}, {}), '([n_t_reg, n_z_reg])', True, 'import numpy as np\n'), ((983, 25, 983, 52), 'numpy.zeros', 'np.zeros', ({(983, 34, 983, 51): '[n_t_reg, n_z_reg]'}, {}), '([n_t_reg, n_z_reg])', True, 'import numpy as np\n'), ((1423, 15, 1423, 30), 'time.time', 'ttiimmee.time', ({}, {}), '()', True, 'import time as ttiimmee\n'), ((1516, 17, 1516, 33), 'numpy.zeros', 'np.zeros', ({(1516, 26, 1516, 32): 'N_bins'}, {}), '(N_bins)', True, 'import numpy as np\n'), ((1557, 18, 1557, 49), 'netCDF4.Dataset', 'Dataset', (), '', False, 'from netCDF4 import Dataset\n'), ((1604, 16, 1604, 32), 'numpy.floor', 'np.floor', ({(1604, 25, 1604, 31): 'LCL / dz'}, {}), '(LCL / dz)', True, 'import numpy as np\n'), ((1854, 4, 1854, 61), 'numpy.savez', 'np.savez', (), '', True, 'import numpy as np\n'), ((101, 27, 101, 73), 'glob.glob', 'glob.glob', ({(101, 37, 101, 72): "directory_input + date + '/*column*.nc'"}, {}), "(directory_input + date + '/*column*.nc')", False, 'import glob\n'), ((111, 23, 111, 51), 'netCDF4.Dataset', 'Dataset', (), '', False, 'from netCDF4 import Dataset\n'), ((112, 23, 112, 51), 'netCDF4.Dataset', 'Dataset', (), '', False, 'from netCDF4 import Dataset\n'), ((113, 23, 113, 53), 'netCDF4.Dataset', 'Dataset', (), '', False, 'from netCDF4 import Dataset\n'), ((114, 23, 114, 52), 'netCDF4.Dataset', 'Dataset', (), '', False, 'from netCDF4 import Dataset\n'), ((123, 18, 123, 67), 'glob.glob', 'glob.glob', ({(123, 28, 123, 66): "(directory_input + date + '/*default?0*.nc')"}, {}), "(directory_input + date + '/*default?0*.nc')", False, 'import glob\n'), ((186, 25, 186, 68), 'numpy.exp', 'np.exp', ({(186, 32, 186, 67): '(17.67 * (T - 273.15) / (T - 29.65))'}, {}), '(17.67 * (T - 273.15) / (T - 29.65))', True, 'import numpy as np\n'), ((193, 20, 193, 35), 'numpy.log', 'np.log', ({(193, 27, 193, 34): 'rel_hum'}, {}), '(rel_hum)', True, 'import numpy as np\n'), ((234, 16, 234, 31), 'time.time', 'ttiimmee.time', ({}, {}), '()', True, 'import time as ttiimmee\n'), ((381, 16, 381, 31), 'time.time', 'ttiimmee.time', ({}, {}), '()', True, 'import time as ttiimmee\n'), ((389, 18, 389, 30), 'numpy.zeros', 'np.zeros', ({(389, 27, 389, 29): 'nt'}, {}), '(nt)', True, 'import numpy as np\n'), ((523, 16, 523, 31), 'time.time', 'ttiimmee.time', ({}, {}), '()', True, 'import time as ttiimmee\n'), ((732, 27, 732, 60), 'numpy.zeros', 'np.zeros', ({(732, 36, 732, 59): '[curtain_cells, n_z_reg]'}, {}), '([curtain_cells, n_z_reg])', True, 'import numpy as np\n'), ((862, 33, 862, 51), 'numpy.zeros', 'np.zeros', ({(862, 42, 862, 50): '[N_bins]'}, {}), '([N_bins])', True, 'import numpy as np\n'), ((863, 33, 863, 51), 'numpy.zeros', 'np.zeros', ({(863, 42, 863, 50): '[N_bins]'}, {}), '([N_bins])', True, 'import numpy as np\n'), ((864, 33, 864, 51), 'numpy.zeros', 'np.zeros', ({(864, 42, 864, 50): '[N_bins]'}, {}), '([N_bins])', True, 'import numpy as np\n'), ((865, 33, 865, 67), 'numpy.zeros', 'np.zeros', ({(865, 42, 865, 66): '[N_bins, n_t_reg, n_z_reg]'}, {}), '([N_bins, n_t_reg, n_z_reg])', True, 'import numpy as np\n'), ((866, 33, 866, 67), 'numpy.zeros', 'np.zeros', ({(866, 42, 866, 66): '[N_bins, n_t_reg, n_z_reg]'}, {}), '([N_bins, n_t_reg, n_z_reg])', True, 'import numpy as np\n'), ((867, 33, 867, 67), 'numpy.zeros', 'np.zeros', ({(867, 42, 867, 66): '[N_bins, n_t_reg, n_z_reg]'}, {}), '([N_bins, n_t_reg, n_z_reg])', True, 'import numpy as np\n'), ((869, 23, 869, 62), 'numpy.linspace', 'np.linspace', ({(869, 35, 869, 38): '125', (869, 39, 869, 54): '-125 + N_bins * 250', (869, 55, 869, 61): 'N_bins'}, {}), '(125, -125 + N_bins * 250, N_bins)', True, 'import numpy as np\n'), ((879, 27, 879, 73), 'glob.glob', 'glob.glob', ({(879, 37, 879, 72): "directory_input + date + '/*column*.nc'"}, {}), "(directory_input + date + '/*column*.nc')", False, 'import glob\n'), ((887, 23, 887, 51), 'netCDF4.Dataset', 'Dataset', (), '', False, 'from netCDF4 import Dataset\n'), ((888, 23, 888, 51), 'netCDF4.Dataset', 'Dataset', (), '', False, 'from netCDF4 import Dataset\n'), ((894, 23, 894, 53), 'netCDF4.Dataset', 'Dataset', (), '', False, 'from netCDF4 import Dataset\n'), ((900, 18, 900, 67), 'glob.glob', 'glob.glob', ({(900, 28, 900, 66): "(directory_input + date + '/*default?0*.nc')"}, {}), "(directory_input + date + '/*default?0*.nc')", False, 'import glob\n'), ((941, 25, 941, 68), 'numpy.exp', 'np.exp', ({(941, 32, 941, 67): '(17.67 * (T - 273.15) / (T - 29.65))'}, {}), '(17.67 * (T - 273.15) / (T - 29.65))', True, 'import numpy as np\n'), ((948, 20, 948, 35), 'numpy.log', 'np.log', ({(948, 27, 948, 34): 'rel_hum'}, {}), '(rel_hum)', True, 'import numpy as np\n'), ((1002, 16, 1002, 31), 'time.time', 'ttiimmee.time', ({}, {}), '()', True, 'import time as ttiimmee\n'), ((1133, 16, 1133, 31), 'time.time', 'ttiimmee.time', ({}, {}), '()', True, 'import time as ttiimmee\n'), ((1141, 18, 1141, 30), 'numpy.zeros', 'np.zeros', ({(1141, 27, 1141, 29): 'nt'}, {}), '(nt)', True, 'import numpy as np\n'), ((1414, 16, 1414, 31), 'time.time', 'ttiimmee.time', ({}, {}), '()', True, 'import time as ttiimmee\n'), ((1530, 27, 1530, 79), 'glob.glob', 'glob.glob', ({(1530, 37, 1530, 78): "directory_input + date + '/*.column.*.*.*.nc'"}, {}), "(directory_input + date + '/*.column.*.*.*.nc')", False, 'import glob\n'), ((1538, 23, 1538, 51), 'netCDF4.Dataset', 'Dataset', (), '', False, 'from netCDF4 import Dataset\n'), ((1539, 23, 1539, 51), 'netCDF4.Dataset', 'Dataset', (), '', False, 'from netCDF4 import Dataset\n'), ((1545, 23, 1545, 53), 'netCDF4.Dataset', 'Dataset', (), '', False, 'from netCDF4 import Dataset\n'), ((1551, 18, 1551, 74), 'glob.glob', 'glob.glob', ({(1551, 28, 1551, 73): "(directory_input + date + '/testbed?default?0*.nc')"}, {}), "(directory_input + date + '/testbed?default?0*.nc')", False, 'import glob\n'), ((1593, 25, 1593, 68), 'numpy.exp', 'np.exp', ({(1593, 32, 1593, 67): '(17.67 * (T - 273.15) / (T - 29.65))'}, {}), '(17.67 * (T - 273.15) / (T - 29.65))', True, 'import numpy as np\n'), ((1600, 20, 1600, 35), 'numpy.log', 'np.log', ({(1600, 27, 1600, 34): 'rel_hum'}, {}), '(rel_hum)', True, 'import numpy as np\n'), ((1635, 16, 1635, 31), 'time.time', 'ttiimmee.time', ({}, {}), '()', True, 'import time as ttiimmee\n'), ((1761, 16, 1761, 31), 'time.time', 'ttiimmee.time', ({}, {}), '()', True, 'import time as ttiimmee\n'), ((1769, 18, 1769, 30), 'numpy.zeros', 'np.zeros', ({(1769, 27, 1769, 29): 'nt'}, {}), '(nt)', True, 'import numpy as np\n'), ((213, 30, 213, 48), 'math.floor', 'math.floor', ({(213, 41, 213, 47): 'nz * 0.6'}, {}), '(nz * 0.6)', False, 'import math\n'), ((237, 23, 237, 60), 'netCDF4.Dataset', 'Dataset', (), '', False, 'from netCDF4 import Dataset\n'), ((402, 21, 402, 57), 'numpy.where', 'np.where', ({(402, 30, 402, 56): '((cl_base - cbl_1d[:nt]) * dz < 0)'}, {}), '((cl_base - cbl_1d[:nt]) * dz < 0)', True, 'import numpy as np\n'), ((818, 25, 818, 67), 'numpy.linspace', 'np.linspace', ({(818, 37, 818, 48): '0 + d_z_tmp / 2', (818, 49, 818, 63): 'z_reg_orig_top', (818, 64, 818, 66): 'nz'}, {}), '(0 + d_z_tmp / 2, z_reg_orig_top, nz)', True, 'import numpy as np\n'), ((821, 28, 821, 55), 'numpy.hstack', 'np.hstack', ({(821, 38, 821, 54): '[[0], z_reg_orig]'}, {}), '([[0], z_reg_orig])', True, 'import numpy as np\n'), ((822, 28, 822, 69), 'numpy.vstack', 'np.vstack', ({(822, 38, 822, 68): '[var_orig_2d[(0), :], var_orig_2d]'}, {}), '([var_orig_2d[(0), :], var_orig_2d])', True, 'import numpy as np\n'), ((825, 16, 825, 75), 'scipy.interpolate.interp2d', 'interp2d', (), '', False, 'from scipy.interpolate import interp2d\n'), ((968, 30, 968, 48), 'math.floor', 'math.floor', ({(968, 41, 968, 47): 'nz * 0.6'}, {}), '(nz * 0.6)', False, 'import math\n'), ((1005, 23, 1005, 60), 'netCDF4.Dataset', 'Dataset', (), '', False, 'from netCDF4 import Dataset\n'), ((1154, 21, 1154, 57), 'numpy.where', 'np.where', ({(1154, 30, 1154, 56): '((cl_base - cbl_1d[:nt]) * dz < 0)'}, {}), '((cl_base - cbl_1d[:nt]) * dz < 0)', True, 'import numpy as np\n'), ((1167, 25, 1167, 61), 'numpy.sqrt', 'np.sqrt', ({(1167, 33, 1167, 60): 'u_ref_prof ** 2 + v_ref_prof ** 2'}, {}), '(u_ref_prof ** 2 + v_ref_prof ** 2)', True, 'import numpy as np\n'), ((1172, 22, 1172, 50), 'numpy.mean', 'np.mean', ({(1172, 30, 1172, 49): 'cl_base[cbl_cl_idx]'}, {}), '(cl_base[cbl_cl_idx])', True, 'import numpy as np\n'), ((1620, 30, 1620, 48), 'math.floor', 'math.floor', ({(1620, 41, 1620, 47): 'nz * 0.6'}, {}), '(nz * 0.6)', False, 'import math\n'), ((1638, 23, 1638, 60), 'netCDF4.Dataset', 'Dataset', (), '', False, 'from netCDF4 import Dataset\n'), ((1782, 21, 1782, 57), 'numpy.where', 'np.where', ({(1782, 30, 1782, 56): '((cl_base - cbl_1d[:nt]) * dz < 0)'}, {}), '((cl_base - cbl_1d[:nt]) * dz < 0)', True, 'import numpy as np\n'), ((1829, 33, 1829, 86), 'numpy.histogram', 'np.histogram', (), '', True, 'import numpy as np\n'), ((313, 25, 313, 56), 'numpy.transpose', 'np.transpose', ({(313, 39, 313, 43): 'w_3d', (313, 46, 313, 55): '(0, 2, 1)'}, {}), '(w_3d, (0, 2, 1))', True, 'import numpy as np\n'), ((314, 25, 314, 56), 'numpy.transpose', 'np.transpose', ({(314, 38, 314, 43): 'ql_3d', (314, 46, 314, 55): '(0, 2, 1)'}, {}), '(ql_3d, (0, 2, 1))', True, 'import numpy as np\n'), ((315, 25, 315, 56), 'numpy.transpose', 'np.transpose', ({(315, 38, 315, 43): 'qt_3d', (315, 46, 315, 55): '(0, 2, 1)'}, {}), '(qt_3d, (0, 2, 1))', True, 'import numpy as np\n'), ((316, 25, 316, 56), 'numpy.transpose', 'np.transpose', ({(316, 38, 316, 44): 'thl_3d', (316, 46, 316, 55): '(0, 2, 1)'}, {}), '(thl_3d, (0, 2, 1))', True, 'import numpy as np\n'), ((332, 16, 332, 28), 'gc.collect', 'gc.collect', ({}, {}), '()', False, 'import gc\n'), ((346, 24, 346, 50), 'numpy.sqrt', 'np.sqrt', ({(346, 32, 346, 49): 'u_ref ** 2 + v_ref ** 2'}, {}), '(u_ref ** 2 + v_ref ** 2)', True, 'import numpy as np\n'), ((352, 23, 352, 69), 'numpy.linspace', 'np.linspace', ({(352, 35, 352, 36): '0', (352, 37, 352, 60): '2 * nx * ny * time_resolution', (352, 61, 352, 68): '2 * nx * ny'}, {}), '(0, 2 * nx * ny * time_resolution, 2 * nx * ny)', True, 'import numpy as np\n'), ((362, 27, 362, 43), 'numpy.zeros', 'np.zeros', ({(362, 36, 362, 42): '(nz, 1)'}, {}), '((nz, 1))', True, 'import numpy as np\n'), ((363, 27, 363, 43), 'numpy.zeros', 'np.zeros', ({(363, 36, 363, 42): '(nz, 1)'}, {}), '((nz, 1))', True, 'import numpy as np\n'), ((364, 27, 364, 43), 'numpy.zeros', 'np.zeros', ({(364, 36, 364, 42): '(nz, 1)'}, {}), '((nz, 1))', True, 'import numpy as np\n'), ((365, 27, 365, 43), 'numpy.zeros', 'np.zeros', ({(365, 36, 365, 42): '(nz, 1)'}, {}), '((nz, 1))', True, 'import numpy as np\n'), ((366, 27, 366, 38), 'numpy.zeros', 'np.zeros', ({(366, 36, 366, 37): '1'}, {}), '(1)', True, 'import numpy as np\n'), ((394, 15, 394, 33), 'numpy.max', 'np.max', ({(394, 22, 394, 32): 'ql_2d[:, (t)]'}, {}), '(ql_2d[:, (t)])', True, 'import numpy as np\n'), ((395, 27, 395, 53), 'numpy.argmax', 'np.argmax', ({(395, 37, 395, 52): 'ql_2d[:, (t)] > 1e-06'}, {}), '(ql_2d[:, (t)] > 1e-06)', True, 'import numpy as np\n'), ((439, 30, 439, 84), 'numpy.min', 'np.min', ({(439, 37, 439, 83): 'cl_base[cbl_cl_idx[t_chord_begin:t_chord_end]]'}, {}), '(cl_base[cbl_cl_idx[t_chord_begin:t_chord_end]])', True, 'import numpy as np\n'), ((783, 29, 783, 71), 'numpy.linspace', 'np.linspace', ({(783, 41, 783, 52): '0 + d_z_tmp / 2', (783, 53, 783, 67): 'z_reg_orig_top', (783, 68, 783, 70): 'nz'}, {}), '(0 + d_z_tmp / 2, z_reg_orig_top, nz)', True, 'import numpy as np\n'), ((786, 29, 786, 56), 'numpy.hstack', 'np.hstack', ({(786, 39, 786, 55): '[[0], z_reg_orig]'}, {}), '([[0], z_reg_orig])', True, 'import numpy as np\n'), ((787, 33, 787, 74), 'numpy.hstack', 'np.hstack', ({(787, 43, 787, 73): '[var_orig_col[0], var_orig_col]'}, {}), '([var_orig_col[0], var_orig_col])', True, 'import numpy as np\n'), ((792, 20, 792, 70), 'scipy.interpolate.interp1d', 'interp1d', (), '', False, 'from scipy.interpolate import interp1d\n'), ((1068, 25, 1068, 56), 'numpy.transpose', 'np.transpose', ({(1068, 39, 1068, 43): 'w_3d', (1068, 46, 1068, 55): '(0, 2, 1)'}, {}), '(w_3d, (0, 2, 1))', True, 'import numpy as np\n'), ((1069, 25, 1069, 56), 'numpy.transpose', 'np.transpose', ({(1069, 38, 1069, 43): 'ql_3d', (1069, 46, 1069, 55): '(0, 2, 1)'}, {}), '(ql_3d, (0, 2, 1))', True, 'import numpy as np\n'), ((1070, 25, 1070, 56), 'numpy.transpose', 'np.transpose', ({(1070, 38, 1070, 44): 'var_3d', (1070, 46, 1070, 55): '(0, 2, 1)'}, {}), '(var_3d, (0, 2, 1))', True, 'import numpy as np\n'), ((1085, 16, 1085, 28), 'gc.collect', 'gc.collect', ({}, {}), '()', False, 'import gc\n'), ((1101, 24, 1101, 50), 'numpy.sqrt', 'np.sqrt', ({(1101, 32, 1101, 49): 'u_ref ** 2 + v_ref ** 2'}, {}), '(u_ref ** 2 + v_ref ** 2)', True, 'import numpy as np\n'), ((1108, 23, 1108, 69), 'numpy.linspace', 'np.linspace', ({(1108, 35, 1108, 36): '0', (1108, 37, 1108, 60): '2 * nx * ny * time_resolution', (1108, 61, 1108, 68): '2 * nx * ny'}, {}), '(0, 2 * nx * ny * time_resolution, 2 * nx * ny)', True, 'import numpy as np\n'), ((1115, 25, 1115, 41), 'numpy.zeros', 'np.zeros', ({(1115, 34, 1115, 40): '(nz, 1)'}, {}), '((nz, 1))', True, 'import numpy as np\n'), ((1116, 25, 1116, 41), 'numpy.zeros', 'np.zeros', ({(1116, 34, 1116, 40): '(nz, 1)'}, {}), '((nz, 1))', True, 'import numpy as np\n'), ((1117, 25, 1117, 41), 'numpy.zeros', 'np.zeros', ({(1117, 34, 1117, 40): '(nz, 1)'}, {}), '((nz, 1))', True, 'import numpy as np\n'), ((1118, 25, 1118, 36), 'numpy.zeros', 'np.zeros', ({(1118, 34, 1118, 35): '1'}, {}), '(1)', True, 'import numpy as np\n'), ((1146, 15, 1146, 33), 'numpy.max', 'np.max', ({(1146, 22, 1146, 32): 'ql_2d[:, (t)]'}, {}), '(ql_2d[:, (t)])', True, 'import numpy as np\n'), ((1147, 27, 1147, 55), 'numpy.argmax', 'np.argmax', ({(1147, 37, 1147, 54): 'ql_2d[:, (t)] > ql_min'}, {}), '(ql_2d[:, (t)] > ql_min)', True, 'import numpy as np\n'), ((1188, 29, 1188, 71), 'numpy.linspace', 'np.linspace', ({(1188, 41, 1188, 52): '0 + d_z_tmp / 2', (1188, 53, 1188, 67): 'z_reg_orig_top', (1188, 68, 1188, 70): 'nz'}, {}), '(0 + d_z_tmp / 2, z_reg_orig_top, nz)', True, 'import numpy as np\n'), ((1191, 29, 1191, 56), 'numpy.hstack', 'np.hstack', ({(1191, 39, 1191, 55): '[[0], z_reg_orig]'}, {}), '([[0], z_reg_orig])', True, 'import numpy as np\n'), ((1192, 46, 1192, 105), 'numpy.hstack', 'np.hstack', ({(1192, 56, 1192, 104): '[scaling_factor_x_prof[0], scaling_factor_x_prof]'}, {}), '([scaling_factor_x_prof[0], scaling_factor_x_prof])', True, 'import numpy as np\n'), ((1193, 46, 1193, 105), 'numpy.hstack', 'np.hstack', ({(1193, 56, 1193, 104): '[scaling_factor_y_prof[0], scaling_factor_y_prof]'}, {}), '([scaling_factor_y_prof[0], scaling_factor_y_prof])', True, 'import numpy as np\n'), ((1197, 22, 1197, 85), 'scipy.interpolate.interp1d', 'interp1d', (), '', False, 'from scipy.interpolate import interp1d\n'), ((1198, 22, 1198, 85), 'scipy.interpolate.interp1d', 'interp1d', (), '', False, 'from scipy.interpolate import interp1d\n'), ((1238, 30, 1238, 84), 'numpy.min', 'np.min', ({(1238, 37, 1238, 83): 'cl_base[cbl_cl_idx[t_chord_begin:t_chord_end]]'}, {}), '(cl_base[cbl_cl_idx[t_chord_begin:t_chord_end]])', True, 'import numpy as np\n'), ((1697, 25, 1697, 56), 'numpy.transpose', 'np.transpose', ({(1697, 39, 1697, 43): 'w_3d', (1697, 46, 1697, 55): '(0, 2, 1)'}, {}), '(w_3d, (0, 2, 1))', True, 'import numpy as np\n'), ((1698, 25, 1698, 56), 'numpy.transpose', 'np.transpose', ({(1698, 38, 1698, 43): 'ql_3d', (1698, 46, 1698, 55): '(0, 2, 1)'}, {}), '(ql_3d, (0, 2, 1))', True, 'import numpy as np\n'), ((1699, 25, 1699, 56), 'numpy.transpose', 'np.transpose', ({(1699, 38, 1699, 44): 'var_3d', (1699, 46, 1699, 55): '(0, 2, 1)'}, {}), '(var_3d, (0, 2, 1))', True, 'import numpy as np\n'), ((1721, 16, 1721, 28), 'gc.collect', 'gc.collect', ({}, {}), '()', False, 'import gc\n'), ((1724, 23, 1724, 53), 'numpy.linspace', 'np.linspace', ({(1724, 35, 1724, 36): '0', (1724, 37, 1724, 44): '2 * nx * ny', (1724, 45, 1724, 52): '2 * nx * ny'}, {}), '(0, 2 * nx * ny, 2 * nx * ny)', True, 'import numpy as np\n'), ((1743, 25, 1743, 41), 'numpy.zeros', 'np.zeros', ({(1743, 34, 1743, 40): '(nz, 1)'}, {}), '((nz, 1))', True, 'import numpy as np\n'), ((1744, 25, 1744, 41), 'numpy.zeros', 'np.zeros', ({(1744, 34, 1744, 40): '(nz, 1)'}, {}), '((nz, 1))', True, 'import numpy as np\n'), ((1745, 25, 1745, 41), 'numpy.zeros', 'np.zeros', ({(1745, 34, 1745, 40): '(nz, 1)'}, {}), '((nz, 1))', True, 'import numpy as np\n'), ((1746, 25, 1746, 36), 'numpy.zeros', 'np.zeros', ({(1746, 34, 1746, 35): '1'}, {}), '(1)', True, 'import numpy as np\n'), ((1774, 15, 1774, 33), 'numpy.max', 'np.max', ({(1774, 22, 1774, 32): 'ql_2d[:, (t)]'}, {}), '(ql_2d[:, (t)])', True, 'import numpy as np\n'), ((1775, 27, 1775, 55), 'numpy.argmax', 'np.argmax', ({(1775, 37, 1775, 54): 'ql_2d[:, (t)] > ql_min'}, {}), '(ql_2d[:, (t)] > ql_min)', True, 'import numpy as np\n'), ((1813, 28, 1813, 47), 'numpy.mean', 'np.mean', ({(1813, 36, 1813, 46): 'bflux_s_1d'}, {}), '(bflux_s_1d)', True, 'import numpy as np\n'), ((473, 34, 473, 82), 'numpy.mean', 'np.mean', ({(473, 42, 473, 81): 'bflux_s_1d[idx_beg_chord:idx_end_chord]'}, {}), '(bflux_s_1d[idx_beg_chord:idx_end_chord])', True, 'import numpy as np\n'), ((475, 35, 475, 84), 'numpy.mean', 'np.mean', ({(475, 43, 475, 83): 'qtflux_s_1d[idx_beg_chord:idx_end_chord]'}, {}), '(qtflux_s_1d[idx_beg_chord:idx_end_chord])', True, 'import numpy as np\n'), ((477, 36, 477, 86), 'numpy.mean', 'np.mean', ({(477, 44, 477, 85): 'thlflux_s_1d[idx_beg_chord:idx_end_chord]'}, {}), '(thlflux_s_1d[idx_beg_chord:idx_end_chord])', True, 'import numpy as np\n'), ((502, 35, 502, 72), 'numpy.percentile', 'np.percentile', ({(502, 49, 502, 59): 'w_base_vec', (502, 60, 502, 71): 'percentiles'}, {}), '(w_base_vec, percentiles)', True, 'import numpy as np\n'), ((512, 48, 512, 82), 'numpy.vstack', 'np.vstack', ({(512, 58, 512, 81): '[chord_w_per, tmp_w_per]'}, {}), '([chord_w_per, tmp_w_per])', True, 'import numpy as np\n'), ((513, 48, 513, 85), 'numpy.vstack', 'np.vstack', ({(513, 58, 513, 84): '[chord_w_per, tmp_w_per_up]'}, {}), '([chord_w_per, tmp_w_per_up])', True, 'import numpy as np\n'), ((751, 20, 751, 57), 'numpy.floor', 'np.floor', ({(751, 29, 751, 56): 'idx_end_chord - idx_beg_chord'}, {}), '(idx_end_chord - idx_beg_chord)', True, 'import numpy as np\n'), ((1202, 71, 1202, 102), 'numpy.mean', 'np.mean', ({(1202, 79, 1202, 101): 'scaling_factor_x_inter'}, {}), '(scaling_factor_x_inter)', True, 'import numpy as np\n'), ((1203, 55, 1203, 86), 'numpy.mean', 'np.mean', ({(1203, 63, 1203, 85): 'scaling_factor_y_inter'}, {}), '(scaling_factor_y_inter)', True, 'import numpy as np\n'), ((1806, 20, 1806, 30), 'sys.exit', 'sys.exit', ({}, {}), '()', False, 'import sys\n'), ((1820, 32, 1820, 52), 'numpy.mean', 'np.mean', ({(1820, 40, 1820, 51): 'qtflux_s_1d'}, {}), '(qtflux_s_1d)', True, 'import numpy as np\n'), ((1823, 31, 1823, 52), 'numpy.mean', 'np.mean', ({(1823, 39, 1823, 51): 'thlflux_s_1d'}, {}), '(thlflux_s_1d)', True, 'import numpy as np\n'), ((462, 30, 462, 71), 'numpy.mean', 'np.mean', ({(462, 38, 462, 70): 'u_2d[cl_base[ch_idx_l], ch_idx_l]'}, {}), '(u_2d[cl_base[ch_idx_l], ch_idx_l])', True, 'import numpy as np\n'), ((463, 30, 463, 71), 'numpy.mean', 'np.mean', ({(463, 38, 463, 70): 'v_2d[cl_base[ch_idx_l], ch_idx_l]'}, {}), '(v_2d[cl_base[ch_idx_l], ch_idx_l])', True, 'import numpy as np\n'), ((464, 30, 464, 56), 'numpy.sqrt', 'np.sqrt', ({(464, 38, 464, 55): 'u_ref ** 2 + v_ref ** 2'}, {}), '(u_ref ** 2 + v_ref ** 2)', True, 'import numpy as np\n'), ((470, 38, 470, 86), 'numpy.percentile', 'np.percentile', ({(470, 52, 470, 69): 'cl_base[ch_idx_l]', (470, 70, 470, 85): 'base_percentile'}, {}), '(cl_base[ch_idx_l], base_percentile)', True, 'import numpy as np\n'), ((485, 40, 485, 81), 'numpy.mean', 'np.mean', ({(485, 48, 485, 80): 'w_2d[cl_base[ch_idx_l], ch_idx_l]'}, {}), '(w_2d[cl_base[ch_idx_l], ch_idx_l])', True, 'import numpy as np\n'), ((486, 35, 486, 78), 'numpy.mean', 'np.mean', ({(486, 43, 486, 77): 'w_2d[cl_base[ch_idx_l] - 1, ch_idx_l]'}, {}), '(w_2d[cl_base[ch_idx_l] - 1, ch_idx_l])', True, 'import numpy as np\n'), ((487, 37, 487, 82), 'numpy.mean', 'np.mean', ({(487, 45, 487, 81): 'thl_2d[cl_base[ch_idx_l] - 1, ch_idx_l]'}, {}), '(thl_2d[cl_base[ch_idx_l] - 1, ch_idx_l])', True, 'import numpy as np\n'), ((493, 40, 493, 80), 'numpy.mean', 'np.mean', ({(493, 48, 493, 79): 'thl_2d[cl_base_25_idx, ch_idx_l]'}, {}), '(thl_2d[cl_base_25_idx, ch_idx_l])', True, 'import numpy as np\n'), ((494, 40, 494, 80), 'numpy.mean', 'np.mean', ({(494, 48, 494, 79): 'thl_2d[cl_base_75_idx, ch_idx_l]'}, {}), '(thl_2d[cl_base_75_idx, ch_idx_l])', True, 'import numpy as np\n'), ((495, 36, 495, 80), 'numpy.mean', 'np.mean', ({(495, 44, 495, 79): 'qt_2d[cl_base[ch_idx_l] - 1, ch_idx_l]'}, {}), '(qt_2d[cl_base[ch_idx_l] - 1, ch_idx_l])', True, 'import numpy as np\n'), ((496, 39, 496, 78), 'numpy.mean', 'np.mean', ({(496, 47, 496, 77): 'qt_2d[cl_base_75_idx, ch_idx_l]'}, {}), '(qt_2d[cl_base_75_idx, ch_idx_l])', True, 'import numpy as np\n'), ((497, 39, 497, 78), 'numpy.mean', 'np.mean', ({(497, 47, 497, 77): 'qt_2d[cl_base_25_idx, ch_idx_l]'}, {}), '(qt_2d[cl_base_25_idx, ch_idx_l])', True, 'import numpy as np\n'), ((498, 40, 498, 82), 'numpy.sum', 'np.sum', ({(498, 47, 498, 81): 'w_2d[cl_base[ch_idx_l] - 1, ch_idx_l]'}, {}), '(w_2d[cl_base[ch_idx_l] - 1, ch_idx_l])', True, 'import numpy as np\n'), ((501, 38, 501, 73), 'numpy.mean', 'np.mean', ({(501, 46, 501, 72): 'w_base_vec[w_base_vec > 0.0]'}, {}), '(w_base_vec[w_base_vec > 0.0])', True, 'import numpy as np\n'), ((505, 39, 505, 92), 'numpy.percentile', 'np.percentile', ({(505, 53, 505, 79): 'w_base_vec[w_base_vec > 0.0]', (505, 80, 505, 91): 'percentiles'}, {}), '(w_base_vec[w_base_vec > 0.0], percentiles)', True, 'import numpy as np\n'), ((507, 39, 507, 62), 'numpy.zeros', 'np.zeros', ({(507, 48, 507, 61): 'n_percentiles'}, {}), '(n_percentiles)', True, 'import numpy as np\n'), ((1802, 52, 1802, 102), 'numpy.percentile', 'np.percentile', ({(1802, 66, 1802, 85): 'cl_base[cbl_cl_idx]', (1802, 86, 1802, 101): 'base_percentile'}, {}), '(cl_base[cbl_cl_idx], base_percentile)', True, 'import numpy as np\n'), ((517, 42, 517, 65), 'numpy.mean', 'np.mean', ({(517, 50, 517, 64): 't_1d[ch_idx_l]'}, {}), '(t_1d[ch_idx_l])', True, 'import numpy as np\n'), ((1280, 56, 1280, 134), 'numpy.percentile', 'np.percentile', ({(1280, 70, 1280, 117): 'cl_base[cbl_cl_idx[t_chord_begin:t_cloudy_idx]]', (1280, 118, 1280, 133): 'base_percentile'}, {}), '(cl_base[cbl_cl_idx[t_chord_begin:t_cloudy_idx]], base_percentile)', True, 'import numpy as np\n'), ((1287, 40, 1287, 88), 'numpy.mean', 'np.mean', ({(1287, 48, 1287, 87): 'bflux_s_1d[idx_beg_chord:idx_end_chord]'}, {}), '(bflux_s_1d[idx_beg_chord:idx_end_chord])', True, 'import numpy as np\n'), ((1339, 27, 1339, 41), 'numpy.mean', 'np.mean', ({(1339, 35, 1339, 40): 'w_tmp'}, {}), '(w_tmp)', True, 'import numpy as np\n'), ((490, 63, 490, 111), 'numpy.percentile', 'np.percentile', ({(490, 77, 490, 94): 'cl_base[ch_idx_l]', (490, 95, 490, 110): 'base_percentile'}, {}), '(cl_base[ch_idx_l], base_percentile)', True, 'import numpy as np\n'), ((1263, 39, 1263, 116), 'numpy.abs', 'np.abs', ({(1263, 46, 1263, 115): '(t_1d - (time_beg_chord - curtain_extra * (time_end_chord - time_beg_chord)))'}, {}), '(t_1d - (time_beg_chord - curtain_extra * (time_end_chord -\n time_beg_chord)))', True, 'import numpy as np\n'), ((1264, 39, 1264, 116), 'numpy.abs', 'np.abs', ({(1264, 46, 1264, 115): '(t_1d - (time_end_chord + curtain_extra * (time_end_chord - time_beg_chord)))'}, {}), '(t_1d - (time_end_chord + curtain_extra * (time_end_chord -\n time_beg_chord)))', True, 'import numpy as np\n'), ((1294, 44, 1294, 93), 'numpy.mean', 'np.mean', ({(1294, 52, 1294, 92): 'qtflux_s_1d[idx_beg_chord:idx_end_chord]'}, {}), '(qtflux_s_1d[idx_beg_chord:idx_end_chord])', True, 'import numpy as np\n'), ((1297, 43, 1297, 93), 'numpy.mean', 'np.mean', ({(1297, 51, 1297, 92): 'thlflux_s_1d[idx_beg_chord:idx_end_chord]'}, {}), '(thlflux_s_1d[idx_beg_chord:idx_end_chord])', True, 'import numpy as np\n'), ((1342, 29, 1342, 43), 'numpy.mean', 'np.mean', ({(1342, 37, 1342, 42): 'w_tmp'}, {}), '(w_tmp)', True, 'import numpy as np\n'), ((1360, 38, 1360, 79), 'numpy.mean', 'np.mean', ({(1360, 46, 1360, 78): 'u_2d[cl_base[ch_idx_l], ch_idx_l]'}, {}), '(u_2d[cl_base[ch_idx_l], ch_idx_l])', True, 'import numpy as np\n'), ((1361, 38, 1361, 79), 'numpy.mean', 'np.mean', ({(1361, 46, 1361, 78): 'v_2d[cl_base[ch_idx_l], ch_idx_l]'}, {}), '(v_2d[cl_base[ch_idx_l], ch_idx_l])', True, 'import numpy as np\n'), ((1362, 38, 1362, 64), 'numpy.sqrt', 'np.sqrt', ({(1362, 46, 1362, 63): 'u_ref ** 2 + v_ref ** 2'}, {}), '(u_ref ** 2 + v_ref ** 2)', True, 'import numpy as np\n'), ((491, 63, 491, 111), 'numpy.percentile', 'np.percentile', ({(491, 77, 491, 94): 'cl_base[ch_idx_l]', (491, 95, 491, 110): 'base_percentile'}, {}), '(cl_base[ch_idx_l], base_percentile)', True, 'import numpy as np\n'), ((1346, 59, 1346, 73), 'numpy.mean', 'np.mean', ({(1346, 67, 1346, 72): 'w_tmp'}, {}), '(w_tmp)', True, 'import numpy as np\n'), ((1378, 35, 1378, 49), 'numpy.mean', 'np.mean', ({(1378, 43, 1378, 48): 'w_tmp'}, {}), '(w_tmp)', True, 'import numpy as np\n'), ((1372, 47, 1372, 80), 'numpy.abs', 'np.abs', ({(1372, 54, 1372, 79): '(chord_length - mid_bin_size)'}, {}), '(chord_length - mid_bin_size)', True, 'import numpy as np\n'), ((1381, 37, 1381, 51), 'numpy.mean', 'np.mean', ({(1381, 45, 1381, 50): 'w_tmp'}, {}), '(w_tmp)', True, 'import numpy as np\n'), ((1385, 67, 1385, 81), 'numpy.mean', 'np.mean', ({(1385, 75, 1385, 80): 'w_tmp'}, {}), '(w_tmp)', True, 'import numpy as np\n')] |
jfear/larval_gonad | expression-atlas-wf/scripts/dmel_tau_housekeeping.py | 624a71741864b74e0372f89bdcca578e5cca3722 | """D. mel housekeeping genes based on tau.
Uses the intersection of w1118 and orgR to create a list of
D. mel housekeeping genes.
"""
import os
from functools import partial
import pandas as pd
from larval_gonad.io import pickle_load, pickle_dump
def main():
# Load mapping of YOgn to FBgn
annot = pickle_load(snakemake.input.annot[0])
pickle_dump(intersect_fbgns(snakemake.input.male, annot), snakemake.output.male)
pickle_dump(intersect_fbgns(snakemake.input.female, annot), snakemake.output.female)
def intersect_fbgns(file_names, annot):
return list(set.intersection(*list(map(partial(convert_to_fbgn, annot=annot), file_names))))
def convert_to_fbgn(file_name, annot):
return set(
[
fbgn
for fbgn in map(lambda x: annot.get(x, None), pickle_load(file_name))
if fbgn is not None
]
)
if __name__ == "__main__":
if os.getenv("SNAKE_DEBUG", False):
from larval_gonad.debug import snakemake_debug
snakemake = snakemake_debug(
workdir="expression-atlas-wf",
input=dict(
male=[
"../output/expression-atlas-wf/tau_housekeeping/w1118_male.pkl",
"../output/expression-atlas-wf/tau_housekeeping/orgR_male.pkl",
],
female=[
"../output/expression-atlas-wf/tau_housekeeping/w1118_female.pkl",
"../output/expression-atlas-wf/tau_housekeeping/orgR_female.pkl",
],
annot="../output/expression-atlas-wf/YOgn_to_dmel_ortholog/dmel.pkl",
),
)
main()
| [((15, 12, 15, 49), 'larval_gonad.io.pickle_load', 'pickle_load', ({(15, 24, 15, 48): 'snakemake.input.annot[0]'}, {}), '(snakemake.input.annot[0])', False, 'from larval_gonad.io import pickle_load, pickle_dump\n'), ((36, 7, 36, 38), 'os.getenv', 'os.getenv', ({(36, 17, 36, 30): '"""SNAKE_DEBUG"""', (36, 32, 36, 37): '(False)'}, {}), "('SNAKE_DEBUG', False)", False, 'import os\n'), ((29, 58, 29, 80), 'larval_gonad.io.pickle_load', 'pickle_load', ({(29, 70, 29, 79): 'file_name'}, {}), '(file_name)', False, 'from larval_gonad.io import pickle_load, pickle_dump\n'), ((22, 43, 22, 80), 'functools.partial', 'partial', (), '', False, 'from functools import partial\n')] |
TK-IBM-Call-for-Code-Challange-2021/call-for-code-challenge-2021 | api-server/server/core/key.py | 7a3d78d4067303d61c4a25d45c0671ae7e984222 | """
Api Key validation
"""
from typing import Optional
from fastapi.security.api_key import APIKeyHeader
from fastapi import HTTPException, Security, Depends
from starlette.status import HTTP_401_UNAUTHORIZED, HTTP_400_BAD_REQUEST, HTTP_403_FORBIDDEN
from server.core.security import verify_key
from server.db.mongodb import AsyncIOMotorClient, get_database
from server.models.user import User
from server.db.crud.user import get_user_by_email
from pydantic import EmailStr
api_key_scheme = APIKeyHeader(name="X-API-KEY", auto_error=False)
email_scheme = APIKeyHeader(name="X-EMAIL-ID", auto_error=False)
async def validate_request(
api_key: Optional[str] = Security(api_key_scheme),
email_id: Optional[EmailStr] = Security(email_scheme),
db: AsyncIOMotorClient = Depends(get_database)
) -> Optional[User]:
"""Validate a request with given email and api key
to any endpoint resource
"""
if api_key is None:
raise HTTPException(
status_code=HTTP_400_BAD_REQUEST, detail="X-API-KEY is missing", headers={}
)
if email_id is None:
raise HTTPException(
status_code=HTTP_400_BAD_REQUEST, detail="X-EMAIL-ID is missing", headers={}
)
user = await get_user_by_email(db, email_id)
# verify email & API key
if user:
api_key = str(user.salt) + str(api_key)
if not verify_key(api_key, user.hashed_api_key):
# api key mismatch
raise HTTPException(
status_code=HTTP_401_UNAUTHORIZED, detail="Access not allowed", headers={}
)
if user.disabled:
# disabled user
raise HTTPException(
status_code=HTTP_403_FORBIDDEN, detail="User is disabled", headers={}
)
if not user.is_active:
# user's email is not verified
raise HTTPException(
status_code=HTTP_401_UNAUTHORIZED, detail="Email not verified", headers={}
)
# All verified
return User(**user.dict())
else:
# not a valid email provided
raise HTTPException(
status_code=HTTP_400_BAD_REQUEST, detail="Unknown Email", headers={}
)
| [((17, 17, 17, 65), 'fastapi.security.api_key.APIKeyHeader', 'APIKeyHeader', (), '', False, 'from fastapi.security.api_key import APIKeyHeader\n'), ((18, 15, 18, 64), 'fastapi.security.api_key.APIKeyHeader', 'APIKeyHeader', (), '', False, 'from fastapi.security.api_key import APIKeyHeader\n'), ((22, 33, 22, 57), 'fastapi.Security', 'Security', ({(22, 42, 22, 56): 'api_key_scheme'}, {}), '(api_key_scheme)', False, 'from fastapi import HTTPException, Security, Depends\n'), ((23, 39, 23, 61), 'fastapi.Security', 'Security', ({(23, 48, 23, 60): 'email_scheme'}, {}), '(email_scheme)', False, 'from fastapi import HTTPException, Security, Depends\n'), ((24, 33, 24, 54), 'fastapi.Depends', 'Depends', ({(24, 41, 24, 53): 'get_database'}, {}), '(get_database)', False, 'from fastapi import HTTPException, Security, Depends\n'), ((30, 14, 32, 9), 'fastapi.HTTPException', 'HTTPException', (), '', False, 'from fastapi import HTTPException, Security, Depends\n'), ((34, 14, 36, 9), 'fastapi.HTTPException', 'HTTPException', (), '', False, 'from fastapi import HTTPException, Security, Depends\n'), ((38, 17, 38, 48), 'server.db.crud.user.get_user_by_email', 'get_user_by_email', ({(38, 35, 38, 37): 'db', (38, 39, 38, 47): 'email_id'}, {}), '(db, email_id)', False, 'from server.db.crud.user import get_user_by_email\n'), ((64, 14, 66, 9), 'fastapi.HTTPException', 'HTTPException', (), '', False, 'from fastapi import HTTPException, Security, Depends\n'), ((44, 15, 44, 55), 'server.core.security.verify_key', 'verify_key', ({(44, 26, 44, 33): 'api_key', (44, 35, 44, 54): 'user.hashed_api_key'}, {}), '(api_key, user.hashed_api_key)', False, 'from server.core.security import verify_key\n'), ((46, 18, 48, 13), 'fastapi.HTTPException', 'HTTPException', (), '', False, 'from fastapi import HTTPException, Security, Depends\n'), ((51, 18, 53, 13), 'fastapi.HTTPException', 'HTTPException', (), '', False, 'from fastapi import HTTPException, Security, Depends\n'), ((56, 18, 58, 13), 'fastapi.HTTPException', 'HTTPException', (), '', False, 'from fastapi import HTTPException, Security, Depends\n')] |
Osirium/linuxkit | scripts/kconfig-split.py | b710224cdf9a8425a7129cdcb84fc1af00f926d7 | #!/usr/bin/env python
# This is a slightly modified version of ChromiumOS' splitconfig
# https://chromium.googlesource.com/chromiumos/third_party/kernel/+/stabilize-5899.B-chromeos-3.14/chromeos/scripts/splitconfig
"""See this page for more details:
http://dev.chromium.org/chromium-os/how-tos-and-troubleshooting/kernel-configuration
"""
import os
import re
import sys
allconfigs = {}
# Parse config files
for config in sys.argv[1:]:
allconfigs[config] = set()
for line in open(config):
m = re.match("#*\s*CONFIG_(\w+)[\s=](.*)$", line)
if not m:
continue
option, value = m.groups()
allconfigs[config].add((option, value))
# Split out common config options
common = allconfigs.values()[0].copy()
for config in allconfigs.keys():
common &= allconfigs[config]
for config in allconfigs.keys():
allconfigs[config] -= common
allconfigs["common"] = common
# Generate new splitconfigs
for config in allconfigs.keys():
f = open("split-" + config, "w")
for option, value in sorted(list(allconfigs[config])):
if value == "is not set":
print >>f, "# CONFIG_%s %s" % (option, value)
else:
print >>f, "CONFIG_%s=%s" % (option, value)
f.close()
| [((21, 12, 21, 57), 're.match', 're.match', ({(21, 21, 21, 50): '"""#*\\\\s*CONFIG_(\\\\w+)[\\\\s=](.*)$"""', (21, 52, 21, 56): 'line'}, {}), "('#*\\\\s*CONFIG_(\\\\w+)[\\\\s=](.*)$', line)", False, 'import re\n')] |
Mannan2812/azure-cli-extensions | src/synapse/azext_synapse/vendored_sdks/azure_synapse/models/livy_statement_output.py | e2b34efe23795f6db9c59100534a40f0813c3d95 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class LivyStatementOutput(Model):
"""LivyStatementOutput.
:param status:
:type status: str
:param execution_count:
:type execution_count: int
:param data:
:type data: object
:param ename:
:type ename: str
:param evalue:
:type evalue: str
:param traceback:
:type traceback: list[str]
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'execution_count': {'key': 'execution_count', 'type': 'int'},
'data': {'key': 'data', 'type': 'object'},
'ename': {'key': 'ename', 'type': 'str'},
'evalue': {'key': 'evalue', 'type': 'str'},
'traceback': {'key': 'traceback', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(LivyStatementOutput, self).__init__(**kwargs)
self.status = kwargs.get('status', None)
self.execution_count = kwargs.get('execution_count', None)
self.data = kwargs.get('data', None)
self.ename = kwargs.get('ename', None)
self.evalue = kwargs.get('evalue', None)
self.traceback = kwargs.get('traceback', None)
| [] |
mafshar/sub-puppo | src/main.py | 20fe5bf3ca3d250d846c545085f748e706c4a33e | #!/usr/bin/env python
'''
Notes:
- Weak implies weakly supervised learning (4 classes)
- Strong implies strongly (fully) superversied learning (10 classes)
- frame number is set to 22ms (default); that is the "sweet spot" based on dsp literature
- sampling rate is 16kHz (for the MFCC of each track)
- Accuracy increases as the test set gets smaller, which implies that a lot of these machine learning models are heavily data-driven (i.e. feed more data for more performance boosts)
- Currently, optimal benchmark results are achieved with a test set size of 10 percent of the total data
'''
import os
import glob
import sys
import time
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from processing import mfcc_processing, datasets
from deep_models import models
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cluster import KMeans
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import normalize
input_path = './data/genres/'
mfcc_path = './data/processed/mfcc/'
have_mfccs = True
def normalize_and_split(data, test_size, verbose=False):
scaler = MinMaxScaler()
features = scaler.fit_transform(data['features'])
labels = data['labels']
X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=test_size, random_state=42)
norm_data = {}
norm_data['X_train'] = X_train
norm_data['X_test'] = X_test
norm_data['y_train'] = y_train
norm_data['y_test'] = y_test
if verbose:
print 'Training sample feature size:', X_train.shape
print 'Training sample label size:', y_train.shape
print 'Test sample feature size:', X_test.shape
print 'Test sample label size:', y_test.shape
return norm_data
def svm_classifier(data, test_size, weak=False, verbose=False):
norm_data = normalize_and_split(data, test_size, verbose)
X_train = norm_data['X_train']
X_test = norm_data['X_test']
y_train = norm_data['y_train']
y_test = norm_data['y_test']
tic = time.time()
svm_clf = SVC(C=10000, kernel='poly', degree=3, tol=0.0001, max_iter=5000, decision_function_shape='ovr') if weak \
else SVC(C=10000, kernel='poly', degree=6, tol=0.01, max_iter=5000, decision_function_shape='ovr')
svm_clf.fit(X_train, y_train)
print 'TEST ACCURACY:', svm_clf.score(X_test, y_test)
toc = time.time()
if verbose:
print '\ttime taken for SVM classifier to run is', toc-tic
return
def knn_classifier(data, test_size, weak=False, verbose=False):
norm_data = normalize_and_split(data, test_size, verbose)
X_train = norm_data['X_train']
X_test = norm_data['X_test']
y_train = norm_data['y_train']
y_test = norm_data['y_test']
tic = time.time()
knn_clf = KNeighborsClassifier(n_neighbors=3, weights='distance', p=1, n_jobs=-1) if weak \
else KNeighborsClassifier(n_neighbors=8, weights='distance', p=1, n_jobs=-1)
knn_clf.fit(X_train, y_train)
print 'TEST ACCURACY:', knn_clf.score(X_test, y_test)
toc = time.time()
if verbose:
print '\ttime taken for KNN classifier to run is', toc-tic
return
def mfcc_nn_model(num_epochs, test_size, weak=False, verbose=False):
tic = time.time()
tensorize = datasets.ToTensor()
dataset = None
net = None
if weak:
dataset = datasets.MfccDatasetWeak(mfcc_path, tensorize)
net = models.MfccNetWeak()
else:
dataset = datasets.MfccDataset(mfcc_path, tensorize)
net = models.MfccNet()
trainloader, testloader = datasets.train_test_dataset_split(dataset)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.1, momentum=0.8)
for epoch in range(num_epochs):
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward
optimizer.step()
# print statistics
running_loss += loss.item()
if verbose and i % 5 == 0: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
inputs, labels = data
outputs = net(inputs)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print 'TEST ACCURACY:', 1. * correct / total
toc = time.time()
if verbose:
print '\ttime taken for Mfcc NN to run is', toc-tic
return
if __name__ == '__main__':
mfccs = None
data = None
if not have_mfccs:
have_mfccs = True
print 'calculating mfccs...'
mfccs = mfcc_processing.write_mfccs(input_path, mfcc_path, True)
else :
print 'retrieving mfccs...'
mfccs = mfcc_processing.read_mfccs(mfcc_path, True)
data = mfcc_processing.featurize_data(mfccs, weak=True, verbose=True)
print
weak = False
if weak:
data = mfcc_processing.featurize_data(mfccs, weak=True, verbose=True)
print
svm_classifier(data, test_size=0.10, weak=True, verbose=True)
print
knn_classifier(data, test_size=0.10, weak=True, verbose=True)
print
mfcc_nn_model(num_epochs=10, test_size=0.10, weak=True, verbose=True)
else:
data = mfcc_processing.featurize_data(mfccs, weak=False, verbose=True)
print
svm_classifier(data, test_size=0.10, weak=False, verbose=True)
print
knn_classifier(data, test_size=0.10, weak=False, verbose=True)
print
mfcc_nn_model(num_epochs=10, test_size=0.10, weak=False, verbose=True)
| [] |
nmantani/FileInsight-plugins | plugins/Operations/Crypto/blowfish_encrypt_dialog.py | a6b036672e4c72ed06678729a86293212b7213db | #
# Blowfish encrypt - Encrypt selected region with Blowfish
#
# Copyright (c) 2019, Nobutaka Mantani
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import binascii
import re
import sys
import time
import tkinter
import tkinter.ttk
import tkinter.messagebox
try:
import Cryptodome.Cipher.Blowfish
import Cryptodome.Util.Padding
except ImportError:
exit(-1) # PyCryptodome is not installed
# Print selected items
def encrypt(data, root, cm, ckt, ek, cit, ei):
blowfish_mode = {"ECB":Cryptodome.Cipher.Blowfish.MODE_ECB,
"CBC":Cryptodome.Cipher.Blowfish.MODE_CBC,
"CFB":Cryptodome.Cipher.Blowfish.MODE_CFB,
"OFB":Cryptodome.Cipher.Blowfish.MODE_OFB,
"CTR":Cryptodome.Cipher.Blowfish.MODE_CTR}
mode = cm.get()
key_type = ckt.get()
key = ek.get()
iv_type = cit.get()
iv = ei.get()
if key_type == "Hex":
if re.match("^([0-9A-Fa-f]{2})+$", key):
key = binascii.a2b_hex(key)
else:
tkinter.messagebox.showerror("Error:", message="Key is not in hex format.")
return
else:
key = key.encode()
if mode in ["CBC", "CFB", "OFB", "CTR"] and iv_type == "Hex":
if re.match("^([0-9A-Fa-f]{2})+$", iv):
iv = binascii.a2b_hex(iv)
else:
tkinter.messagebox.showerror("Error:", message="IV is not in hex format.")
return
else:
iv = iv.encode()
if mode in ["CBC", "CFB", "OFB", "CTR"] and len(iv) != Cryptodome.Cipher.Blowfish.block_size:
tkinter.messagebox.showerror("Error:", message="IV size must be %d bytes." % Cryptodome.Cipher.Blowfish.block_size)
return
key_length = len(key)
if key_length < 4 or key_length > 56:
tkinter.messagebox.showerror("Error:", message="Key size must be in the range from 4 bytes and 56 bytes.")
return
try:
if mode == "CFB":
cipher = Cryptodome.Cipher.Blowfish.new(key, blowfish_mode[mode], iv, segment_size=Cryptodome.Cipher.Blowfish.block_size * 8)
elif mode in ["CBC", "OFB"]:
cipher = Cryptodome.Cipher.Blowfish.new(key, blowfish_mode[mode], iv)
elif mode == "CTR": # The first seven bytes of IV are used as nonce and the last byte is used as initial_value (compatible with CyberChef).
cipher = Cryptodome.Cipher.Blowfish.new(key, blowfish_mode[mode], nonce=iv[0:7], initial_value=iv[7])
else:
cipher = Cryptodome.Cipher.Blowfish.new(key, blowfish_mode[mode])
if mode in ["ECB", "CBC"]:
data = Cryptodome.Util.Padding.pad(data, Cryptodome.Cipher.Blowfish.block_size)
d = cipher.encrypt(data)
except Exception as e:
tkinter.messagebox.showerror("Error:", message=e)
root.quit()
exit(1) # Not decrypted
sys.stdout.buffer.write(d)
root.quit()
exit(0) # Decrypted successfully
def combo_mode_selected(root, cm, cit, ei, lc):
mode = cm.get()
if mode == "ECB":
cit.configure(state = "disabled")
ei.configure(state = "disabled")
else:
cit.configure(state = "readonly")
ei.configure(state = "normal")
if mode == "CTR":
lc.grid()
else:
lc.grid_remove()
# Receive data
data = sys.stdin.buffer.read()
# Create input dialog
root = tkinter.Tk()
root.title("Blowfish encrypt")
root.protocol("WM_DELETE_WINDOW", (lambda r=root: r.quit()))
label_mode = tkinter.Label(root, text="Mode:")
label_mode.grid(row=0, column=0, padx=5, pady=5, sticky="w")
combo_mode = tkinter.ttk.Combobox(root, width=5, state="readonly")
combo_mode["values"] = ("ECB", "CBC", "CFB", "OFB", "CTR")
combo_mode.current(0)
combo_mode.grid(row=0, column=1, padx=5, pady=5, sticky="w")
label_key_type = tkinter.Label(root, text="Key type:")
label_key_type.grid(row=1, column=0, padx=5, pady=5, sticky="w")
combo_key_type = tkinter.ttk.Combobox(root, width=5, state="readonly")
combo_key_type["values"] = ("Text", "Hex")
combo_key_type.current(0)
combo_key_type.grid(row=1, column=1, padx=5, pady=5)
label_key = tkinter.Label(root, text="Key:")
label_key.grid(row=1, column=2, padx=5, pady=5, sticky="w")
entry_key = tkinter.Entry(width=32)
entry_key.grid(row=1, column=3, padx=5, pady=5, sticky="w")
entry_key.focus() # Focus to this widget
label_iv_type = tkinter.Label(root, text="IV type:")
label_iv_type.grid(row=2, column=0, padx=5, pady=5, sticky="w")
combo_iv_type = tkinter.ttk.Combobox(root, width=5, state="readonly")
combo_iv_type["values"] = ("Text", "Hex")
combo_iv_type.current(0)
combo_iv_type.grid(row=2, column=1, padx=5, pady=5)
label_iv = tkinter.Label(root, text="IV:")
label_iv.grid(row=2, column=2, padx=5, pady=5, sticky="w")
entry_iv = tkinter.Entry(width=32)
entry_iv.grid(row=2, column=3, padx=5, pady=5, sticky="w")
button = tkinter.Button(root, text="OK", command=(lambda data=data, root=root, cm=combo_mode, ckt=combo_key_type, ek=entry_key, cit=combo_iv_type, ei=entry_iv: encrypt(data, root, cm, ckt, ek, cit, ei)))
button.grid(row=3, column=0, padx=5, pady=5, columnspan=4)
label_ctr = tkinter.Label(root, text="Note:\nThe first seven bytes of IV are used as the nonce and the last one\nbyte is used as the initial value of the counter (compatible with\nCyberChef).", justify="left")
label_ctr.grid(row=4, column=0, padx=5, pady=5, columnspan=4, sticky="w")
label_ctr.grid_remove()
# Set callback functions
combo_mode.bind('<<ComboboxSelected>>', lambda event, root=root, cm=combo_mode, cit=combo_iv_type, ei=entry_iv, lc=label_ctr: combo_mode_selected(root, cm, cit, ei, lc))
combo_mode.bind("<Return>", lambda event, data=data, root=root, cm=combo_mode, ckt=combo_key_type, ek=entry_key, cit=combo_iv_type, ei=entry_iv: encrypt(data, root, cm, ckt, ek, cit, ei))
combo_key_type.bind("<Return>", lambda event, data=data, root=root, cm=combo_mode, ckt=combo_key_type, ek=entry_key, cit=combo_iv_type, ei=entry_iv: encrypt(data, root, cm, ckt, ek, cit, ei))
entry_key.bind("<Return>", lambda event, data=data, root=root, cm=combo_mode, ckt=combo_key_type, ek=entry_key, cit=combo_iv_type, ei=entry_iv: encrypt(data, root, cm, ckt, ek, cit, ei))
combo_iv_type.bind("<Return>", lambda event, data=data, root=root, cm=combo_mode, ckt=combo_key_type, ek=entry_key, cit=combo_iv_type, ei=entry_iv: encrypt(data, root, cm, ckt, ek, cit, ei))
entry_iv.bind("<Return>", lambda event, data=data, root=root, cm=combo_mode, ckt=combo_key_type, ek=entry_key, cit=combo_iv_type, ei=entry_iv: encrypt(data, root, cm, ckt, ek, cit, ei))
button.bind("<Return>", lambda event, data=data, root=root, cm=combo_mode, ckt=combo_key_type, ek=entry_key, cit=combo_iv_type, ei=entry_iv: encrypt(data, root, cm, ckt, ek, cit, ei))
# These are disabled in the initial state (ECB mode)
combo_iv_type.configure(state = "disabled")
entry_iv.configure(state = "disabled")
# Adjust window position
sw = root.winfo_screenwidth()
sh = root.winfo_screenheight()
root.update_idletasks() # Necessary to get width and height of the window
ww = root.winfo_width()
wh = root.winfo_height()
root.geometry('+%d+%d' % ((sw/2) - (ww/2), (sh/2) - (wh/2)))
root.mainloop()
exit(1) # Not decrypted
| [((121, 7, 121, 30), 'sys.stdin.buffer.read', 'sys.stdin.buffer.read', ({}, {}), '()', False, 'import sys\n'), ((124, 7, 124, 19), 'tkinter.Tk', 'tkinter.Tk', ({}, {}), '()', False, 'import tkinter\n'), ((128, 13, 128, 46), 'tkinter.Label', 'tkinter.Label', (), '', False, 'import tkinter\n'), ((131, 13, 131, 66), 'tkinter.ttk.Combobox', 'tkinter.ttk.Combobox', (), '', False, 'import tkinter\n'), ((136, 17, 136, 54), 'tkinter.Label', 'tkinter.Label', (), '', False, 'import tkinter\n'), ((139, 17, 139, 70), 'tkinter.ttk.Combobox', 'tkinter.ttk.Combobox', (), '', False, 'import tkinter\n'), ((144, 12, 144, 44), 'tkinter.Label', 'tkinter.Label', (), '', False, 'import tkinter\n'), ((147, 12, 147, 35), 'tkinter.Entry', 'tkinter.Entry', (), '', False, 'import tkinter\n'), ((151, 16, 151, 52), 'tkinter.Label', 'tkinter.Label', (), '', False, 'import tkinter\n'), ((154, 16, 154, 69), 'tkinter.ttk.Combobox', 'tkinter.ttk.Combobox', (), '', False, 'import tkinter\n'), ((159, 11, 159, 42), 'tkinter.Label', 'tkinter.Label', (), '', False, 'import tkinter\n'), ((162, 11, 162, 34), 'tkinter.Entry', 'tkinter.Entry', (), '', False, 'import tkinter\n'), ((168, 12, 168, 209), 'tkinter.Label', 'tkinter.Label', (), '', False, 'import tkinter\n'), ((102, 4, 102, 30), 'sys.stdout.buffer.write', 'sys.stdout.buffer.write', ({(102, 28, 102, 29): 'd'}, {}), '(d)', False, 'import sys\n'), ((57, 11, 57, 47), 're.match', 're.match', ({(57, 20, 57, 41): '"""^([0-9A-Fa-f]{2})+$"""', (57, 43, 57, 46): 'key'}, {}), "('^([0-9A-Fa-f]{2})+$', key)", False, 'import re\n'), ((66, 11, 66, 46), 're.match', 're.match', ({(66, 20, 66, 41): '"""^([0-9A-Fa-f]{2})+$"""', (66, 43, 66, 45): 'iv'}, {}), "('^([0-9A-Fa-f]{2})+$', iv)", False, 'import re\n'), ((75, 8, 75, 123), 'tkinter.messagebox.showerror', 'tkinter.messagebox.showerror', (), '', False, 'import tkinter\n'), ((80, 8, 80, 114), 'tkinter.messagebox.showerror', 'tkinter.messagebox.showerror', (), '', False, 'import tkinter\n'), ((58, 18, 58, 39), 'binascii.a2b_hex', 'binascii.a2b_hex', ({(58, 35, 58, 38): 'key'}, {}), '(key)', False, 'import binascii\n'), ((60, 12, 60, 87), 'tkinter.messagebox.showerror', 'tkinter.messagebox.showerror', (), '', False, 'import tkinter\n'), ((67, 17, 67, 37), 'binascii.a2b_hex', 'binascii.a2b_hex', ({(67, 34, 67, 36): 'iv'}, {}), '(iv)', False, 'import binascii\n'), ((69, 12, 69, 86), 'tkinter.messagebox.showerror', 'tkinter.messagebox.showerror', (), '', False, 'import tkinter\n'), ((98, 8, 98, 57), 'tkinter.messagebox.showerror', 'tkinter.messagebox.showerror', (), '', False, 'import tkinter\n')] |
preo/dnspython | dns/rdtypes/IN/IPSECKEY.py | 465785f85f87508209117264c677080e901e957c | # Copyright (C) 2006, 2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import cStringIO
import struct
import dns.exception
import dns.inet
import dns.name
class IPSECKEY(dns.rdata.Rdata):
"""IPSECKEY record
@ivar precedence: the precedence for this key data
@type precedence: int
@ivar gateway_type: the gateway type
@type gateway_type: int
@ivar algorithm: the algorithm to use
@type algorithm: int
@ivar gateway: the public key
@type gateway: None, IPv4 address, IPV6 address, or domain name
@ivar key: the public key
@type key: string
@see: RFC 4025"""
__slots__ = ['precedence', 'gateway_type', 'algorithm', 'gateway', 'key']
def __init__(self, rdclass, rdtype, precedence, gateway_type, algorithm,
gateway, key):
super(IPSECKEY, self).__init__(rdclass, rdtype)
if gateway_type == 0:
if gateway != '.' and not gateway is None:
raise SyntaxError('invalid gateway for gateway type 0')
gateway = None
elif gateway_type == 1:
# check that it's OK
junk = dns.inet.inet_pton(dns.inet.AF_INET, gateway)
elif gateway_type == 2:
# check that it's OK
junk = dns.inet.inet_pton(dns.inet.AF_INET6, gateway)
elif gateway_type == 3:
pass
else:
raise SyntaxError('invalid IPSECKEY gateway type: %d' % gateway_type)
self.precedence = precedence
self.gateway_type = gateway_type
self.algorithm = algorithm
self.gateway = gateway
self.key = key
def to_text(self, origin=None, relativize=True, **kw):
if self.gateway_type == 0:
gateway = '.'
elif self.gateway_type == 1:
gateway = self.gateway
elif self.gateway_type == 2:
gateway = self.gateway
elif self.gateway_type == 3:
gateway = str(self.gateway.choose_relativity(origin, relativize))
else:
raise ValueError('invalid gateway type')
return '%d %d %d %s %s' % (self.precedence, self.gateway_type,
self.algorithm, gateway,
dns.rdata._base64ify(self.key))
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
precedence = tok.get_uint8()
gateway_type = tok.get_uint8()
algorithm = tok.get_uint8()
if gateway_type == 3:
gateway = tok.get_name().choose_relativity(origin, relativize)
else:
gateway = tok.get_string()
chunks = []
while 1:
t = tok.get().unescape()
if t.is_eol_or_eof():
break
if not t.is_identifier():
raise dns.exception.SyntaxError
chunks.append(t.value)
b64 = ''.join(chunks)
key = b64.decode('base64_codec')
return cls(rdclass, rdtype, precedence, gateway_type, algorithm,
gateway, key)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
header = struct.pack("!BBB", self.precedence, self.gateway_type,
self.algorithm)
file.write(header)
if self.gateway_type == 0:
pass
elif self.gateway_type == 1:
file.write(dns.inet.inet_pton(dns.inet.AF_INET, self.gateway))
elif self.gateway_type == 2:
file.write(dns.inet.inet_pton(dns.inet.AF_INET6, self.gateway))
elif self.gateway_type == 3:
self.gateway.to_wire(file, None, origin)
else:
raise ValueError('invalid gateway type')
file.write(self.key)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
if rdlen < 3:
raise dns.exception.FormError
header = struct.unpack('!BBB', wire[current : current + 3])
gateway_type = header[1]
current += 3
rdlen -= 3
if gateway_type == 0:
gateway = None
elif gateway_type == 1:
gateway = dns.inet.inet_ntop(dns.inet.AF_INET,
wire[current : current + 4])
current += 4
rdlen -= 4
elif gateway_type == 2:
gateway = dns.inet.inet_ntop(dns.inet.AF_INET6,
wire[current : current + 16])
current += 16
rdlen -= 16
elif gateway_type == 3:
(gateway, cused) = dns.name.from_wire(wire[: current + rdlen],
current)
current += cused
rdlen -= cused
else:
raise dns.exception.FormError('invalid IPSECKEY gateway type')
key = wire[current : current + rdlen].unwrap()
return cls(rdclass, rdtype, header[0], gateway_type, header[2],
gateway, key)
from_wire = classmethod(from_wire)
| [((102, 17, 103, 44), 'struct.pack', 'struct.pack', ({(102, 29, 102, 35): '"""!BBB"""', (102, 37, 102, 52): 'self.precedence', (102, 54, 102, 71): 'self.gateway_type', (103, 29, 103, 43): 'self.algorithm'}, {}), "('!BBB', self.precedence, self.gateway_type, self.algorithm)", False, 'import struct\n'), ((120, 17, 120, 67), 'struct.unpack', 'struct.unpack', ({(120, 31, 120, 37): '"""!BBB"""', (120, 39, 120, 66): 'wire[current:current + 3]'}, {}), "('!BBB', wire[current:current + 3])", False, 'import struct\n')] |
Projjol/py-multistream-select | multistream_select/__init__.py | 624becaaeefa0a76d6841e27fbf7dea3240d2fe0 | __version = '0.1.0'
__all__ = ['MultiStreamSelect', 'hexify']
__author__ = 'Natnael Getahun ([email protected])'
__name__ = 'multistream'
from .multistream import MultiStreamSelect
from .utils import hexify
| [] |
dagesundholm/DAGE | python/input_reader.py | 0d0ef1d3e74ba751ca4d288db9f1ac7f9a822138 |
"""---------------------------------------------------------------------------------*
* Copyright (c) 2010-2018 Pauli Parkkinen, Eelis Solala, Wen-Hua Xu, *
* Sergio Losilla, Elias Toivanen, Jonas Juselius *
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy *
* of this software and associated documentation files (the "Software"), to deal *
* in the Software without restriction, including without limitation the rights *
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell *
* copies of the Software, and to permit persons to whom the Software is *
* furnished to do so, subject to the following conditions: *
* *
* The above copyright notice and this permission notice shall be included in all*
* copies or substantial portions of the Software. *
* *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR *
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE *
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER *
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, *
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE *
* SOFTWARE. *
*----------------------------------------------------------------------------------"""
# Input file reader
import os
import sys
import xml.etree.ElementTree as ET
import numpy, ast
from .generate_objects import SettingsGenerator
from collections import OrderedDict
class InputProgrammingError(Exception):
pass
class InputXML(object):
tag_type = 'input'
definition_tag = 'input_definition'
def __init__(self, filename = None, \
definition_filename = None,\
input_object = None,\
parent_object = None,\
definition = None, \
directory = None):
if (input_object is not None):
self.root = input_object
elif filename is not None:
if definition_filename is None:
definition_filename = os.path.dirname(os.path.realpath(__file__))+"/input_parameters.xml"
if os.path.exists(filename):
self.tree = ET.parse(filename)
self.root = self.tree.getroot()
else:
print("Path for definition file: '{}' does not exist".format(filename))
else:
self.root = None
self.parent_object = parent_object
if directory is not None:
self.directory = directory
elif filename is not None and os.path.exists(filename):
self.directory = os.path.dirname(filename)
elif self.parent_object is not None:
self.directory = self.parent_object.directory
else:
self.directory = None
if definition is not None:
self.definition = definition
elif definition_filename is not None:
if os.path.exists(definition_filename):
definition = ET.parse(definition_filename)
self.definition = definition.getroot()
else:
sys.exit("Input definition filename does not exist: {}".format(definition_filename))
elif self.parent_object is not None:
definition = self.parent_object.definition.find(self.definition_tag)
if definition is not None:
self.definition = definition
else:
sys.exit("Definition tag '{}' not found from parent definition tree", self.definition_tag)
else:
sys.exit("Definition tag input not given.")
self.retrieve()
def prepare(self):
"""
Prepare the input to have all things required to
call the Fortran interface
"""
self.parse()
self.handle_folders()
self.fill_id_values()
kwargs = OrderedDict()
self.get_interface_argument_values(kwargs)
return kwargs
def form_new_directory_path(self, path_text, original_directory = None):
"""
Creates a new directory path from 'path_text' and 'original_directory' and
validate that it exists. Returns the new path.
"""
if original_directory is not None:
complete_path = os.path.join(original_directory, path_text)
else:
complete_path = path_text
directory_path = os.path.dirname(complete_path)
# check if the path exists
if not os.path.exists(directory_path):
raise Exception("Error: '{}' tag path '{}' does not exist".format(self.tag_type, complete_path))
return directory_path
def retrieve_path(self, path_text, directory):
"""
Retrieves content of xml file at path 'path_text'
to and store it to 'parameter_name' atribute of 'self'.
"""
if directory is not None:
complete_path = os.path.join(directory, path_text)
else:
complete_path = path_text
# check if the path exists
if os.path.exists(complete_path):
tree = ET.parse(complete_path)
return tree.getroot()
else:
raise Exception("Error: '{}' tag path '{}' does not exist".format(self.tag_type, complete_path))
def retrieve(self):
"""
Retrieves content to the tag from external file(s),
if the tag has attribute or child named 'path' and/or
'extends_path'.
"""
if self.root is not None:
# check if current tag has an attribute or child with
# name 'path'
path_text = InputXML.read_tag_or_attribute_value(self.root, 'path')
# try to retrieve the content from path_text
if path_text is not None and path_text != "":
try:
self.root = self.retrieve_path(path_text, self.directory)
self.directory = self.form_new_directory_path(path_text, self.directory)
except Exception as e:
sys.exit(str(e))
# check if current tag has an attribute or child with
# name 'extends_path'
path_text = InputXML.read_tag_or_attribute_value(self.root, 'extends_path')
self.extends_roots = []
self.extends_directories = []
directory = self.directory
while path_text is not None:
# try to retrieve the content from path_text
try:
self.extends_roots.append(self.retrieve_path(path_text, directory))
self.extends_directories.append(self.form_new_directory_path(path_text, directory))
except Exception as e:
sys.exit(str(e))
# prepare for the next loop by getting the next extends path and corresponding directory
directory = self.extends_directories[-1]
path_text = InputXML.read_tag_or_attribute_value(self.extends_roots[-1], 'extends_path')
def fill_id_values(self):
"""
Finds the id for each parameter where reference is made with name
and fills it to the correct place
"""
for parameter_name in self.parameter_values:
if parameter_name.endswith("_id"):
# check if the tag has value that is not 0, in that case
# we are not finding the value
if self.get_parameter_value(parameter_name) == 0:
tagtype = parameter_name[:parameter_name.rfind('_')]
name_tag_found = tagtype+"_name" in self.parameter_values
if name_tag_found:
name = self.parameter_values[tagtype+"_name"]
if name is not None and name != "":
id_value = self.get_tagid_for_name(tagtype, name)
if id_value != -1:
self.parameter_values[parameter_name] = id_value
for child in self.children:
child.fill_id_values()
def get_tagid_for_name(self, tagtype, name):
if self.parent_object is not None:
for child in self.parent_object.children:
if hasattr(child, 'tag_type') and child.tag_type == tagtype and hasattr(child, 'name') and child.name == name:
return child.id
return -1
def get_parameter_definition(self, parameter_name):
"""
Retrieve the parameter definition for parameter name
'parameter_name'.
"""
for parameter_definition in self.definition.findall('parameter'):
if parameter_definition.attrib['name'] == parameter_name:
return parameter_definition
return None
def get_definition_tag(self, tag_name):
"""
Retrieve the definition tag for a tag with name = tag_name
"""
definition = self.definition.find('{}'.format(tag_name))
return definition
def _parse_children(self, root, directory):
"""
Parse children of root xml-tag 'root' and store them as
children in the 'self'.
Note: this function is a subfunctionality of function 'parse'
and it should not be used independently.
"""
for tag in root:
if tag.tag not in self.parameter_values:
# try to find the correct definition tag by using the "*_input"-format
definition = self.definition.find('{}_input'.format(tag.tag))
# if the input definition was not found, try to find the definition from
# the '<class>'-tags
if definition is None:
definition_found = False
for definition_tag in self.definition.findall('class'):
if definition_tag.attrib['name'] == tag.tag:
definition = definition_tag
definition_found = True
break
if not definition_found:
print("Warning: Found unknown tag with name '{}'. Ignoring.".format(tag.tag))
continue
else:
child = InputXML(parent_object = self, definition = definition, input_object = tag, directory = directory)
else:
if tag.tag == 'settings':
child = SettingsXML(parent_object = self, definition = definition, input_object = tag, directory = directory)
elif tag.tag == 'structure':
child = StructureXML(parent_object = self, definition = definition, input_object = tag, directory = directory)
elif tag.tag == 'basis_set':
child = BasisSetXML(parent_object = self, definition = definition, input_object = tag, directory = directory)
elif tag.tag == 'action':
child = ActionXML(parent_object = self, definition = definition, input_object = tag, directory = directory)
elif tag.tag == 'scf_energetics':
child = SCFEnergeticsXML(parent_object = self, definition = definition, input_object = tag, directory = directory)
self.children.append(child)
self.child_definitions.append(tag.tag)
self.add_counters(child)
child.parse()
def parse(self):
"""
Parse paremeters and child xml-tags of the root-xml tags stored
in self.root and self.extends_roots. Stores the found child-xml classes
to 'self.children' and the parameter values to 'self.parameter_values'.
The corresponding definitions are stored to 'self.child_definitions' and
'self.parameter_definitions', respectively.
User must note that this function is recursive as it calls 'parse' for
all found children in '_parse_children' calls.
"""
self.parameter_values = OrderedDict()
self.parameter_definitions = OrderedDict()
self.children = []
self.child_definitions = []
# handle the parameters first
for parameter_definition in self.definition.findall('parameter'):
if SettingsGenerator.is_valid_parameter(parameter_definition):
self.set_parameter_value(parameter_definition, self.read_parameter_value(parameter_definition))
self.parameter_definitions[parameter_definition.attrib['name']] = parameter_definition
if parameter_definition.attrib['name'] == 'name':
self.name = self.parameter_values['name']
else:
print("PARAMETER is not valid", parameter_definition.attrib['name'])
# if the object has extends_root, then parse the children from it
# and store them to 'self'
if hasattr(self, 'extends_roots') and self.extends_roots is not None\
and hasattr(self, 'extends_directories') and self.extends_directories is not None:
for i, extends_root in enumerate(self.extends_roots):
self._parse_children(extends_root, self.extends_directories[i])
# parse the children from the xml-root of this object and store them
# to 'self'
if self.root is not None:
self._parse_children(self.root, self.directory)
# add the tag classes that are not found in the input file, just to
# input the default values.
for definition_tag in self.definition.findall('class'):
if definition_tag.attrib['name'] not in self.child_definitions:
child = InputXML(parent_object = self, definition = definition_tag)
self.children.append(child)
child.parse()
def handle_folders(self):
"""
Creates missing folders and replaces relative paths with
non-relative ones
"""
for parameter_name in self.parameter_values:
if parameter_name in ['output_folder', 'input_folder', 'folder_path']:
if self.parameter_values[parameter_name] is not None:
# convert the non absolute paths to absolute ones
if not os.path.isabs(self.parameter_values[parameter_name]):
# join the directory of the file with the input directory
path = os.path.join(self.directory, self.parameter_values[parameter_name])
# make the path more readable by removing extra slashes and dots
self.parameter_values[parameter_name] = os.path.normpath(path)
# if the output folder does not exist, create it
if parameter_name == 'output_folder' and not os.path.exists(self.parameter_values[parameter_name]):
os.makedirs(self.parameter_values[parameter_name])
for child in self.children:
child.handle_folders()
def get_interface_argument_values(self, argument_values, parameter_definitions = {}, abbreviation = None, counter_present = False):
"""
This function converts the values of the parameters to a form suitable for the
Fortran interface. The converted values are stored to input-output dictionary 'arguments_values'.
"""
if 'abbreviation' in self.definition.attrib:
abbreviation = self.definition.attrib['abbreviation']
for parameter_name in self.parameter_values:
if SettingsGenerator.generate_fortran(self.parameter_definitions[parameter_name]):
if abbreviation is not None:
argument_key = "{}_{}".format(abbreviation, parameter_name)
else:
argument_key = parameter_name
if counter_present:
# Check if the parameter value is None. If the value is None, the
# parameter is not present in the input file, and the default
# value of the parameter is not specified.
if self.parameter_values[parameter_name] is not None:
if argument_key in argument_values and argument_values[argument_key] is not None:
argument_values[argument_key].append(self.parameter_values[parameter_name])
else:
argument_values[argument_key] = [self.parameter_values[parameter_name]]
parameter_definitions[argument_key] = self.parameter_definitions[parameter_name]
else:
if argument_key not in parameter_definitions:
argument_values[argument_key] = None
parameter_definitions[argument_key] = self.parameter_definitions[parameter_name]
else:
if argument_key in argument_values:
print("Warning: Found two (or more) arguments for the same parameter: {}".format(argument_key))
else:
argument_values[argument_key] = self.parameter_values[parameter_name]
parameter_definitions[argument_key] = self.parameter_definitions[parameter_name]
for child in self.children:
if 'global_index_counter' in child.definition.attrib or 'local_index_counter' in child.definition.attrib or 'counters' in child.definition.attrib:
counter_present = True
if SettingsGenerator.generate_fortran(child.definition):
child.get_interface_argument_values(argument_values, parameter_definitions, abbreviation = abbreviation, counter_present = counter_present)
# if we are at the root, convert the values with type list to numpy arrays
if self.parent_object is None:
for argument_key in list(argument_values):
# the string lists need some special attention:
if parameter_definitions[argument_key].attrib['type'].startswith('string') and type(argument_values[argument_key]) == list:
temp = numpy.empty((256, len(argument_values[argument_key])+1), dtype="c")
for j, value in enumerate(argument_values[argument_key]):
temp[:, j] = "{0:{width}}".format(argument_values[argument_key][j], width=256)
argument_values[argument_key] = numpy.array(temp, dtype="c").T
elif type(argument_values[argument_key]) == list:
temp_array = numpy.array(argument_values[argument_key], order='F').T
shape = temp_array.shape
if len(shape) == 3:
new_shape = (shape[0], shape[1], shape[2]+1)
elif len(shape) == 2:
new_shape = (shape[0], shape[1]+1)
else:
new_shape = (shape[0]+1)
new_array = numpy.empty(new_shape, order='F')
if len(shape) == 3:
new_array[:, :, :shape[2]] = temp_array[:, :, :]
elif len(shape) == 2:
new_array[:, :shape[1]] = temp_array[:, :]
else:
new_array[:shape[0]] = temp_array[:]
argument_values[argument_key] = new_array
elif argument_values[argument_key] is None:
del argument_values[argument_key]
def add_counters(self, child):
"""
Add all the counter values for the child object 'child' of 'self' by one
"""
if 'global_index_counter' in child.definition.attrib:
success = self.add_counter_value(child.definition.attrib['global_index_counter'])
if not success:
print("Warning: Adding counter {} failed. Counter not found.".format(child.definition.attrib['global_index_counter']))
else:
child.id = self.get_counter_value(child.definition.attrib['global_index_counter'])
if 'local_index_counter' in child.definition.attrib:
success = self.add_counter_value(child.definition.attrib['local_index_counter'])
if not success:
print("Warning: Adding counter {} failed. Counter not found.".format(child.definition.attrib['local_index_counter']))
if 'counters' in child.definition.attrib:
success = self.add_counter_value(child.definition.attrib['counters'])
if not success:
print("Warning: Adding counter {} failed. Counter not found.".format(child.definition.attrib['counters']))
def add_counter_value(self, counter_name):
"""
Add value of counter parameter with name=='counter_name' by one.
If the counter is not found in the local object, it
is seached from the parent objects.
"""
if counter_name in self.parameter_values:
if self.parameter_values[counter_name] is None:
self.parameter_values[counter_name] = 0
self.parameter_values[counter_name] += 1
return True
else:
if self.parent_object is not None:
return self.parent_object.add_counter_value(counter_name)
else:
return False
def get_counter_value(self, counter_name):
"""
Get the value of a counter with name 'counter_name'.
If the counter is not found in the local object, it
is seached from the parent objects.
"""
if counter_name in self.parameter_values:
return self.parameter_values[counter_name]
else:
if self.parent_object is not None:
return self.parent_object.get_counter_value(counter_name)
else:
return -1
def set_parameter_value(self, parameter_definition, value):
"""
Set an arbitrary value 'value' for the parameter with definition
'parameter_definition'.
"""
# convert the value to right data type and check that it is valid
final_value = self.convert_argument_value(value, parameter_definition)
# check that value is within given limits
self.check_value_range(final_value, parameter_definition)
# set the parameter value
self.parameter_values[parameter_definition.attrib['name']] = final_value
@staticmethod
def read_tag_or_attribute_value(root, name):
"""
Reads the value of a tag or attribute with name 'name' in an xml. If
attribute or tag is not found, None is returned.
"""
value = None
if root is not None:
tag = root.find(name)
if tag is not None:
value = tag.text
elif name in root.attrib:
value = root.attrib[name]
return value
def read_parameter_value(self, parameter_definition):
"""
Read the value of the parameter first from the values of the XML-element,
secondarily from the objects we are extending from and thirdly from
the default value of the parameter definition.
"""
value = InputXML.read_tag_or_attribute_value(self.root, parameter_definition.attrib['name'])
# if value is not found at root, then use the value from extends roots
if value is None and hasattr(self, 'extends_roots') and self.extends_roots is not None:
for extends_root in self.extends_roots:
value = InputXML.read_tag_or_attribute_value(extends_root, parameter_definition.attrib['name'])
# if value is found, break the iteration
if value is not None:
break
# fall back to default value/or None if one is not specified
if value is None:
if 'default' in parameter_definition.attrib:
value = parameter_definition.attrib['default']
return value
def get_parameter_value(self, parameter_name):
"""
Get the value of the parameter from the parsed parameters.
If the parameter is not found an InputProgrammingError
is raised.
"""
if hasattr(self, 'parameter_values') and parameter_name in self.parameter_values:
return self.parameter_values[parameter_name]
else:
raise InputProgrammingError("Accessed parameter: '{}' is not in the values ".format(parameter_name)+ \
"of the object. Have you perfomed 'parse' for the object?")
def parameter_values_are_equal(self, other, parameter_name):
"""
Compare the values of parameter with name 'parameter_name' for
two objects of the same type.
"""
# check that the input objects are of same type
if type(self) != type(other):
raise InputProgrammingError("The objects compared with parameter_values_are_equal"+
" are not of same type.")
# get the values for both input objects
self_value = self.get_parameter_value(parameter_name)
other_value = other.get_parameter_value(parameter_name)
if isinstance(self_value, list) or isinstance(self_value, numpy.ndarray):
if len(self_value) != len(other_value):
return False
for i in range(len(self_value)):
if type(self_value[i]) == float or type(self_value[i]) == numpy.float64 or type(self_value[i]) == numpy.float32 or type(self_value[i]) == numpy.float16:
if abs(self_value[i] - other_value[i]) > 1e-10:
return False
elif self_value[i] != other_value[i]:
return False
return True
else:
return self_value == other_value
def all_parameter_values_are_equal(self, other):
"""
Check if all parameter values of 'self' and 'other'
are equal
"""
for parameter_name in self.parameter_values:
if not self.parameter_values_are_equal(other, parameter_name):
return False
return True
def is_of_same_type_as(self, other):
"""
Check if self is of same type as other
"""
return type(self) == type(other) \
and self.definition.attrib['name'] == other.definition.attrib['name']
def children_are_equal(self, other):
"""
Check if children of 'self' and 'other' are equal with definition
and value
"""
for child in self.children:
equal_found = False
# go through all the children and check if there is equal
for other_child in other.children:
if child == other_child:
equal_found = True
# if not, the children cannot be equal
if not equal_found:
return False
return True
def __eq__(self, other):
"""
Check if two InputXML objects are equal with each other
"""
return self.is_of_same_type_as(other)\
and self.all_parameter_values_are_equal(other)\
and self.children_are_equal(other)
def __ne__(self, other):
return not self.__eq__(other)
def read_array_values(self, value_text, argument_type):
is_number = argument_type.startswith("int") or \
argument_type.startswith("float") or \
argument_type.startswith("double")
# try to evaluate the molecular orbitals as dict
try:
dictionary = ast.literal_eval("{"+ value_text +"}")
size = max(dictionary.keys())
# init array of size
if is_number:
result = [0] * size
else:
result = [None] * size
for key in dictionary:
# convert the indexing from the 1-starting to 0-starting
result[key-1] = dictionary[key]
except:
try:
result = ast.literal_eval("["+ value_text +"]")
except:
raise Exception("Bad form of array, should have a list or a dictionary, value is: {}.".format(value_text))
return result
def convert_argument_value(self, value_text, parameter_definition):
argument_type = parameter_definition.attrib['type']
if SettingsGenerator.has_options(parameter_definition):
value_text = self.get_option_value(value_text, parameter_definition)
if SettingsGenerator.is_array(parameter_definition):
if value_text is None:
value = None
else:
# do the parsing of the input array (could also be a dictionary), which
# has to be changed to a list
array_values = self.read_array_values(value_text, argument_type)
# get the final size of the result array from the parameter definition
size = int(parameter_definition.attrib['shape'])
value = numpy.zeros(size)
try:
for i, arg in enumerate(array_values):
if argument_type.startswith('int'):
value[i] = int(arg)
if argument_type.startswith('float'):
value[i] = float(arg)
if argument_type.startswith('double'):
value[i] = float(arg)
if argument_type.startswith('string'):
if SettingsGenerator.generate_fortran(parameter_definition):
value[i] = str(arg)
else:
value[i] = str(arg)
if argument_type.startswith('bool'):
if arg.lower() == 'false':
value[i] = False
elif arg.lower() == 'true':
value[i] = True
else:
value[i] = bool(arg)
except ValueError:
sys.exit('Error: parameter with type \'{}\' and name \'{}\' has invalid value: \'{}\''.format(argument_type, parameter_definition.attrib['name'], value_text))
else:
try:
if value_text is None:
value = None
elif argument_type.startswith('int'):
value = int(value_text)
elif argument_type.startswith('float'):
value = float(value_text)
elif argument_type.startswith('double'):
value = float(value_text)
elif argument_type.startswith('string'):
if SettingsGenerator.generate_fortran(parameter_definition):
value = str(value_text)
else:
value = str(value_text)
elif argument_type.startswith('bool'):
if value_text.lower() == 'false':
value = False
elif value_text.lower() == 'true':
value = True
else:
value = bool(arg)
except ValueError:
sys.exit('Error: parameter with type \'{}\' and name \'{}\' has invalid value: \'{}\''.format(argument_type, parameter_definition.attrib['name'], value_text))
return value
def check_value_range(self, value, parameter_definition):
if value is not None:
if 'minval' in parameter_definition.attrib:
minval = parameter_definition.attrib['minval']
if value < float(minval):
sys.exit('Error: argument with name {} and value {} is smaller than the smallest allowed value: {}', parameter_definition.attrib['name'], value, float(minval))
if 'maxval' in parameter_definition.attrib:
maxval = parameter_definition.attrib['maxval']
if value > float(maxval):
sys.exit('Error: argument with name {} and value {} is larger than the largest allowed value: {}', parameter_definition.attrib['name'], value, float(maxval))
def get_option_value(self, value_text, parameter_definition):
options = parameter_definition.findall('option')
result = None
if len(options) > 0:
valid_options = ""
for option in options:
if 'value' in option.attrib and value_text == option.attrib['value']:
return value_text
elif 'text_value' in option.attrib and value_text == option.attrib['text_value']:
return option.attrib['value']
else:
valid_options += ("{}: {} ".format(option.attrib['value'], option.attrib['text_value']))
sys.exit('Error: The value "{}" for argument with name "{}" is not within allowed options: {} '.format(value_text, parameter_definition.attrib['name'], valid_options))
def get_root_object(self):
if self.parent_object is None:
return self
else:
return self.parent_object.get_root_object()
class SCFEnergeticsXML(InputXML):
tag_type = 'scf_energetics'
definition_tag = 'scf_energetics_input'
class ActionXML(InputXML):
tag_type = 'action'
definition_tag = 'action_input'
def parse(self):
super(ActionXML, self).parse()
self.handle_output_files()
def handle_output_files(self):
"""
Reads in the output files and creates the corresponding
objects to the tree
"""
if 'output_folder' in self.parameter_values:
scf_energetics_filename = \
os.path.join(self.parameter_values['output_folder'], "scf_energetics.xml")
root_object = self.get_root_object()
# if scf energetics file exists, parse it and add as a child of the root
# and set it as the input scf energetics of the action
if os.path.exists(os.path.join(self.directory, scf_energetics_filename)):
scf_energetics_definition = root_object.definition.find('scf_energetics_input')
scf_energetics = SCFEnergeticsXML(parent_object = root_object, \
definition = scf_energetics_definition)
scf_energetics.root = scf_energetics.retrieve_path(scf_energetics_filename, scf_energetics.directory)
root_object.children.append(scf_energetics)
root_object.child_definitions.append('scf_energetics')
root_object.add_counters(scf_energetics)
scf_energetics.parse()
scf_energetics_id_definition = self.get_parameter_definition('scf_energetics_id')
self.set_parameter_value(scf_energetics_id_definition, scf_energetics.id)
structure_filename = \
os.path.join(self.parameter_values['output_folder'], "structure.xml")
# if structure file exists, parse it and add it as a child of the root
# and set it as the input structure of the action
if os.path.exists(os.path.join(self.directory, structure_filename)):
structure_definition = root_object.definition.find('structure_input')
structure = StructureXML(parent_object = root_object, \
definition = structure_definition)
structure.root = structure.retrieve_path(structure_filename, structure.directory)
root_object.children.append(structure)
root_object.child_definitions.append('structure')
root_object.add_counters(structure)
structure.parse()
structure_id_definition = self.get_parameter_definition('structure_id')
self.set_parameter_value(structure_id_definition, structure.id)
class BasisSetXML(InputXML):
tag_type = 'basis_set'
definition_tag = 'basis_set_input'
class SettingsXML(InputXML):
tag_type = 'settings'
definition_tag = 'settings_input'
class StructureXML(InputXML):
tag_type = 'structure'
definition_tag = 'structure_input'
atom_types = {'H':1, 'He':2, 'Li':3, 'Be':4, 'B':5, 'C':6, 'N':7, 'O':8, 'F':9, 'Ne':10, 'Na': 11, 'Mg':12, 'Al':13, 'Si':14, 'P':15, 'S':16, 'Cl':17, 'Ar':18}
def read_input(self):
charge = self.root.find('charge')
# read relative charge
if (charge is not None):
self.charge = int(charge.text)
else:
self.charge = 0
# read coordinates and atom types
self.coordinates = []
self.types = []
self.charges = []
# first read atom coordinates in 'atom' tags
for i, atom in enumerate(self.root.findall('atom')):
self.read_atom_coordinates_and_type(atom)
# then read atoms in 'atoms' tags
for i, atoms in enumerate(self.root.findall('atoms')):
self.read_atoms_coordinates_and_types(atoms)
def read_atom_coordinates_and_type(self, atom):
result = [0.0, 0.0, 0.0]
x = atom.find('x')
if (x is not None):
result[0] = float(x.text)
y = atom.find('y')
if (y is not None):
result[1] = float(y.text)
z = atom.find('z')
if (z is not None):
result[2] = float(z.text)
xyz = atom.find('xyz')
atom_type = self.read_atom_type(atom)
if (xyz is not None):
xyz_text = xyz.text.strip().split(" ")
if (len(xyz_text) == 4):
atom_type = get_atom_type(xyz_text[0])
atom_charge = get_atom_charge(xyz_text[0])
result[0] = float(xyz_text[1])
result[1] = float(xyz_text[2])
result[2] = float(xyz_text[3])
else:
sys.exit("Error: Too many or too few coordinates in 'atom'->'xyz' -tag.")
self.coordinates.append(result)
self.types.append(atom_type)
self.charges.append(atom_charge)
def get_atom_type(self, atom_type_text):
return int(self.atom_types[atom_type_text])
def get_atom_charge(self, atom_type_text):
return float(self.atom_types[atom_type_text])
def read_atom_type(self, atom):
if 'type' in atom.attrib:
return atom.attrib['type']
else:
sys.exit("Error: The mandatory attribute 'type' not found in 'atom'-tag")
def read_atoms_coordinates_and_types(self, atoms):
xyz = atoms.find('xyz')
coordinates = []
types = []
charges = []
if (xyz is not None):
xyz_lines = xyz.text.splitlines()
for xyz in xyz_lines:
xyz_text = xyz.strip().split(" ")
xyz_coord = [0.0, 0.0, 0.0]
# ignore empty lines
if (len(xyz_text) == 1 and xyz_text[0] == ""):
continue
elif (len(xyz_text) == 4):
types.append(self.get_atom_type(xyz_text[0]))
charges.append(self.get_atom_charge(xyz_text[0]))
xyz_coord[0] = float(xyz_text[1])
xyz_coord[1] = float(xyz_text[2])
xyz_coord[2] = float(xyz_text[3])
coordinates.append(xyz_coord)
else:
sys.exit("Error: Too many or too few coordinates in 'atoms'->'xyz' -line.")
self.coordinates.extend(coordinates)
self.types.extend(types)
self.charges.extend(charges)
if __name__ == "__main__":
if len(sys.argv) <= 1:
print("Give the input file name as an input.")
else:
inp = InputXML(filename = sys.argv[1], definition_filename = os.path.dirname(os.path.realpath(__file__))+"/input_parameters.xml")
import dage_fortran
dage_fortran.python_interface.run(**inp.prepare())
| [((96, 17, 96, 30), 'collections.OrderedDict', 'OrderedDict', ({}, {}), '()', False, 'from collections import OrderedDict\n'), ((110, 25, 110, 55), 'os.path.dirname', 'os.path.dirname', ({(110, 41, 110, 54): 'complete_path'}, {}), '(complete_path)', False, 'import os\n'), ((130, 11, 130, 40), 'os.path.exists', 'os.path.exists', ({(130, 26, 130, 39): 'complete_path'}, {}), '(complete_path)', False, 'import os\n'), ((281, 32, 281, 45), 'collections.OrderedDict', 'OrderedDict', ({}, {}), '()', False, 'from collections import OrderedDict\n'), ((282, 37, 282, 50), 'collections.OrderedDict', 'OrderedDict', ({}, {}), '()', False, 'from collections import OrderedDict\n'), ((106, 28, 106, 71), 'os.path.join', 'os.path.join', ({(106, 41, 106, 59): 'original_directory', (106, 61, 106, 70): 'path_text'}, {}), '(original_directory, path_text)', False, 'import os\n'), ((113, 15, 113, 45), 'os.path.exists', 'os.path.exists', ({(113, 30, 113, 44): 'directory_path'}, {}), '(directory_path)', False, 'import os\n'), ((125, 28, 125, 62), 'os.path.join', 'os.path.join', ({(125, 41, 125, 50): 'directory', (125, 52, 125, 61): 'path_text'}, {}), '(directory, path_text)', False, 'import os\n'), ((131, 19, 131, 42), 'xml.etree.ElementTree.parse', 'ET.parse', ({(131, 28, 131, 41): 'complete_path'}, {}), '(complete_path)', True, 'import xml.etree.ElementTree as ET\n'), ((618, 25, 618, 63), 'ast.literal_eval', 'ast.literal_eval', ({(618, 42, 618, 62): "'{' + value_text + '}'"}, {}), "('{' + value_text + '}')", False, 'import numpy, ast\n'), ((754, 16, 754, 90), 'os.path.join', 'os.path.join', ({(754, 29, 754, 67): "self.parameter_values['output_folder']", (754, 69, 754, 89): '"""scf_energetics.xml"""'}, {}), "(self.parameter_values['output_folder'], 'scf_energetics.xml')", False, 'import os\n'), ((773, 16, 773, 85), 'os.path.join', 'os.path.join', ({(773, 29, 773, 67): "self.parameter_values['output_folder']", (773, 69, 773, 84): '"""structure.xml"""'}, {}), "(self.parameter_values['output_folder'], 'structure.xml')", False, 'import os\n'), ((869, 12, 869, 85), 'sys.exit', 'sys.exit', ({(869, 21, 869, 84): '"""Error: The mandatory attribute \'type\' not found in \'atom\'-tag"""'}, {}), '("Error: The mandatory attribute \'type\' not found in \'atom\'-tag")', False, 'import sys\n'), ((51, 15, 51, 39), 'os.path.exists', 'os.path.exists', ({(51, 30, 51, 38): 'filename'}, {}), '(filename)', False, 'import os\n'), ((62, 38, 62, 62), 'os.path.exists', 'os.path.exists', ({(62, 53, 62, 61): 'filename'}, {}), '(filename)', False, 'import os\n'), ((63, 29, 63, 54), 'os.path.dirname', 'os.path.dirname', ({(63, 45, 63, 53): 'filename'}, {}), '(filename)', False, 'import os\n'), ((72, 15, 72, 50), 'os.path.exists', 'os.path.exists', ({(72, 30, 72, 49): 'definition_filename'}, {}), '(definition_filename)', False, 'import os\n'), ((651, 24, 651, 41), 'numpy.zeros', 'numpy.zeros', ({(651, 36, 651, 40): 'size'}, {}), '(size)', False, 'import numpy, ast\n'), ((759, 30, 759, 83), 'os.path.join', 'os.path.join', ({(759, 43, 759, 57): 'self.directory', (759, 59, 759, 82): 'scf_energetics_filename'}, {}), '(self.directory, scf_energetics_filename)', False, 'import os\n'), ((777, 30, 777, 78), 'os.path.join', 'os.path.join', ({(777, 43, 777, 57): 'self.directory', (777, 59, 777, 77): 'structure_filename'}, {}), '(self.directory, structure_filename)', False, 'import os\n'), ((853, 16, 853, 89), 'sys.exit', 'sys.exit', ({(853, 25, 853, 88): '"""Error: Too many or too few coordinates in \'atom\'->\'xyz\' -tag."""'}, {}), '("Error: Too many or too few coordinates in \'atom\'->\'xyz\' -tag.")', False, 'import sys\n'), ((52, 28, 52, 46), 'xml.etree.ElementTree.parse', 'ET.parse', ({(52, 37, 52, 45): 'filename'}, {}), '(filename)', True, 'import xml.etree.ElementTree as ET\n'), ((73, 29, 73, 58), 'xml.etree.ElementTree.parse', 'ET.parse', ({(73, 38, 73, 57): 'definition_filename'}, {}), '(definition_filename)', True, 'import xml.etree.ElementTree as ET\n'), ((84, 12, 84, 55), 'sys.exit', 'sys.exit', ({(84, 21, 84, 54): '"""Definition tag input not given."""'}, {}), "('Definition tag input not given.')", False, 'import sys\n'), ((631, 25, 631, 63), 'ast.literal_eval', 'ast.literal_eval', ({(631, 42, 631, 62): "'[' + value_text + ']'"}, {}), "('[' + value_text + ']')", False, 'import numpy, ast\n'), ((82, 16, 82, 106), 'sys.exit', 'sys.exit', ({(82, 25, 82, 84): '"""Definition tag \'{}\' not found from parent definition tree"""', (82, 86, 82, 105): 'self.definition_tag'}, {}), '("Definition tag \'{}\' not found from parent definition tree", self.\n definition_tag)', False, 'import sys\n'), ((328, 27, 328, 79), 'os.path.isabs', 'os.path.isabs', ({(328, 41, 328, 78): 'self.parameter_values[parameter_name]'}, {}), '(self.parameter_values[parameter_name])', False, 'import os\n'), ((330, 31, 330, 98), 'os.path.join', 'os.path.join', ({(330, 44, 330, 58): 'self.directory', (330, 60, 330, 97): 'self.parameter_values[parameter_name]'}, {}), '(self.directory, self.parameter_values[parameter_name])', False, 'import os\n'), ((333, 64, 333, 86), 'os.path.normpath', 'os.path.normpath', ({(333, 81, 333, 85): 'path'}, {}), '(path)', False, 'import os\n'), ((338, 24, 338, 74), 'os.makedirs', 'os.makedirs', ({(338, 36, 338, 73): 'self.parameter_values[parameter_name]'}, {}), '(self.parameter_values[parameter_name])', False, 'import os\n'), ((394, 52, 394, 80), 'numpy.array', 'numpy.array', (), '', False, 'import numpy, ast\n'), ((405, 32, 405, 65), 'numpy.empty', 'numpy.empty', (), '', False, 'import numpy, ast\n'), ((892, 20, 892, 95), 'sys.exit', 'sys.exit', ({(892, 29, 892, 94): '"""Error: Too many or too few coordinates in \'atoms\'->\'xyz\' -line."""'}, {}), '("Error: Too many or too few coordinates in \'atoms\'->\'xyz\' -line.")', False, 'import sys\n'), ((901, 85, 901, 111), 'os.path.realpath', 'os.path.realpath', ({(901, 102, 901, 110): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((50, 54, 50, 80), 'os.path.realpath', 'os.path.realpath', ({(50, 71, 50, 79): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((337, 65, 337, 118), 'os.path.exists', 'os.path.exists', ({(337, 80, 337, 117): 'self.parameter_values[parameter_name]'}, {}), '(self.parameter_values[parameter_name])', False, 'import os\n'), ((396, 33, 396, 86), 'numpy.array', 'numpy.array', (), '', False, 'import numpy, ast\n')] |
MacHu-GWU/pathlib_mate-project | tests/test_mate_hashes_methods.py | 5b8f5441e681730d02209211cce7f46986147418 | # -*- coding: utf-8 -*-
import pytest
from pathlib_mate.pathlib2 import Path
class TestHashesMethods(object):
def test(self):
p = Path(__file__)
assert len({
p.md5, p.get_partial_md5(nbytes=1 << 20),
p.sha256, p.get_partial_sha256(nbytes=1 << 20),
p.sha512, p.get_partial_sha512(nbytes=1 << 20),
}) == 3
if __name__ == "__main__":
import os
basename = os.path.basename(__file__)
pytest.main([basename, "-s", "--tb=native"])
| [((20, 15, 20, 41), 'os.path.basename', 'os.path.basename', ({(20, 32, 20, 40): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((21, 4, 21, 48), 'pytest.main', 'pytest.main', ({(21, 16, 21, 47): "[basename, '-s', '--tb=native']"}, {}), "([basename, '-s', '--tb=native'])", False, 'import pytest\n'), ((9, 12, 9, 26), 'pathlib_mate.pathlib2.Path', 'Path', ({(9, 17, 9, 25): '__file__'}, {}), '(__file__)', False, 'from pathlib_mate.pathlib2 import Path\n')] |
shoes22/openpilot | tools/lib/auth.py | a965de3c96a53b67d106cfa775e3407db82dd0e1 | #!/usr/bin/env python3
"""
Usage::
usage: auth.py [-h] [{google,apple,github,jwt}] [jwt]
Login to your comma account
positional arguments:
{google,apple,github,jwt}
jwt
optional arguments:
-h, --help show this help message and exit
Examples::
./auth.py # Log in with google account
./auth.py github # Log in with GitHub Account
./auth.py jwt ey......hw # Log in with a JWT from https://jwt.comma.ai, for use in CI
"""
import argparse
import sys
import pprint
import webbrowser
from http.server import BaseHTTPRequestHandler, HTTPServer
from typing import Any, Dict
from urllib.parse import parse_qs, urlencode
from tools.lib.api import APIError, CommaApi, UnauthorizedError
from tools.lib.auth_config import set_token, get_token
PORT = 3000
class ClientRedirectServer(HTTPServer):
query_params: Dict[str, Any] = {}
class ClientRedirectHandler(BaseHTTPRequestHandler):
def do_GET(self):
if not self.path.startswith('/auth'):
self.send_response(204)
return
query = self.path.split('?', 1)[-1]
query = parse_qs(query, keep_blank_values=True)
self.server.query_params = query
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write(b'Return to the CLI to continue')
def log_message(self, format, *args): # pylint: disable=redefined-builtin
pass # this prevent http server from dumping messages to stdout
def auth_redirect_link(method):
provider_id = {
'google': 'g',
'apple': 'a',
'github': 'h',
}[method]
params = {
'redirect_uri': f"https://api.comma.ai/v2/auth/{provider_id}/redirect/",
'state': f'service,localhost:{PORT}',
}
if method == 'google':
params.update({
'type': 'web_server',
'client_id': '45471411055-ornt4svd2miog6dnopve7qtmh5mnu6id.apps.googleusercontent.com',
'response_type': 'code',
'scope': 'https://www.googleapis.com/auth/userinfo.email',
'prompt': 'select_account',
})
return 'https://accounts.google.com/o/oauth2/auth?' + urlencode(params)
elif method == 'github':
params.update({
'client_id': '28c4ecb54bb7272cb5a4',
'scope': 'read:user',
})
return 'https://github.com/login/oauth/authorize?' + urlencode(params)
elif method == 'apple':
params.update({
'client_id': 'ai.comma.login',
'response_type': 'code',
'response_mode': 'form_post',
'scope': 'name email',
})
return 'https://appleid.apple.com/auth/authorize?' + urlencode(params)
else:
raise NotImplementedError(f"no redirect implemented for method {method}")
def login(method):
oauth_uri = auth_redirect_link(method)
web_server = ClientRedirectServer(('localhost', PORT), ClientRedirectHandler)
print(f'To sign in, use your browser and navigate to {oauth_uri}')
webbrowser.open(oauth_uri, new=2)
while True:
web_server.handle_request()
if 'code' in web_server.query_params:
break
elif 'error' in web_server.query_params:
print('Authentication Error: "%s". Description: "%s" ' % (
web_server.query_params['error'],
web_server.query_params.get('error_description')), file=sys.stderr)
break
try:
auth_resp = CommaApi().post('v2/auth/', data={'code': web_server.query_params['code'], 'provider': web_server.query_params['provider']})
set_token(auth_resp['access_token'])
except APIError as e:
print(f'Authentication Error: {e}', file=sys.stderr)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Login to your comma account')
parser.add_argument('method', default='google', const='google', nargs='?', choices=['google', 'apple', 'github', 'jwt'])
parser.add_argument('jwt', nargs='?')
args = parser.parse_args()
if args.method == 'jwt':
if args.jwt is None:
print("method JWT selected, but no JWT was provided")
exit(1)
set_token(args.jwt)
else:
login(args.method)
try:
me = CommaApi(token=get_token()).get('/v1/me')
print("Authenticated!")
pprint.pprint(me)
except UnauthorizedError:
print("Got invalid JWT")
exit(1)
| [((105, 2, 105, 35), 'webbrowser.open', 'webbrowser.open', (), '', False, 'import webbrowser\n'), ((125, 11, 125, 77), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((49, 12, 49, 51), 'urllib.parse.parse_qs', 'parse_qs', (), '', False, 'from urllib.parse import parse_qs, urlencode\n'), ((119, 4, 119, 40), 'tools.lib.auth_config.set_token', 'set_token', ({(119, 14, 119, 39): "auth_resp['access_token']"}, {}), "(auth_resp['access_token'])", False, 'from tools.lib.auth_config import set_token, get_token\n'), ((135, 4, 135, 23), 'tools.lib.auth_config.set_token', 'set_token', ({(135, 14, 135, 22): 'args.jwt'}, {}), '(args.jwt)', False, 'from tools.lib.auth_config import set_token, get_token\n'), ((142, 4, 142, 21), 'pprint.pprint', 'pprint.pprint', ({(142, 18, 142, 20): 'me'}, {}), '(me)', False, 'import pprint\n'), ((81, 58, 81, 75), 'urllib.parse.urlencode', 'urlencode', ({(81, 68, 81, 74): 'params'}, {}), '(params)', False, 'from urllib.parse import parse_qs, urlencode\n'), ((87, 57, 87, 74), 'urllib.parse.urlencode', 'urlencode', ({(87, 67, 87, 73): 'params'}, {}), '(params)', False, 'from urllib.parse import parse_qs, urlencode\n'), ((118, 16, 118, 26), 'tools.lib.api.CommaApi', 'CommaApi', ({}, {}), '()', False, 'from tools.lib.api import APIError, CommaApi, UnauthorizedError\n'), ((95, 59, 95, 76), 'urllib.parse.urlencode', 'urlencode', ({(95, 69, 95, 75): 'params'}, {}), '(params)', False, 'from urllib.parse import parse_qs, urlencode\n'), ((140, 24, 140, 35), 'tools.lib.auth_config.get_token', 'get_token', ({}, {}), '()', False, 'from tools.lib.auth_config import set_token, get_token\n')] |
IgorRidanovic/flapi | datedfolder.py | 7eb35cc670a5d1a06b01fb13982ffa63345369de | #! /usr/bin/env python
# -*- coding: utf-8 -*-
'''
Create a Baselight folder with current date and time stamp.
You must refresh the Job Manager after running the script.
Copyright (c) 2020 Igor Riđanović, Igor [at] hdhead.com, www.metafide.com
'''
import flapi
from getflapi import getflapi
from datetime import datetime
def make_dated_folder(ip, scene, foldername):
conn, msg = getflapi()
jobman = conn.JobManager
stamp = datetime.now().strftime('_%d-%b-%Y_%H.%M.%S')
try:
jobman.create_folder(ip, scene, foldername + stamp)
except flapi.FLAPIException:
print 'Could not create a folder.'
return False
# Cleanup
conn.close()
if __name__=='__main__':
conn, msg = getflapi()
print msg + '\n'
ip = 'localhost'
currentScene = 'Test01'
folderName = 'MyFolder'
make_dated_folder(ip, currentScene, folderName)
| [] |
zhidou2/PyElastica | elastica/wrappers/callbacks.py | 0f5502bc5349ab5e5dc794d8dfc82b7c2bd69eb6 | __doc__ = """
CallBacks
-----------
Provides the callBack interface to collect data over time (see `callback_functions.py`).
"""
from elastica.callback_functions import CallBackBaseClass
class CallBacks:
"""
CallBacks class is a wrapper for calling callback functions, set by the user. If the user
wants to collect data from the simulation, the simulator class has to be derived
from the CallBacks class.
Attributes
----------
_callbacks: list
List of call back classes defined for rod-like objects.
"""
def __init__(self):
self._callbacks = []
super(CallBacks, self).__init__()
def collect_diagnostics(self, system):
"""
This method calls user-defined call-back classes for a
user-defined system or rod-like object. You need to input the
system or rod-like object that you want to collect data from.
Parameters
----------
system: object
System is a rod-like object.
Returns
-------
"""
sys_idx = self._get_sys_idx_if_valid(system)
# Create _Constraint object, cache it and return to user
_callbacks = _CallBack(sys_idx)
self._callbacks.append(_callbacks)
return _callbacks
def _finalize(self):
# From stored _CallBack objects, instantiate the boundary conditions
# inplace : https://stackoverflow.com/a/1208792
# dev : the first index stores the rod index to collect data.
# Technically we can use another array but it its one more book-keeping
# step. Being lazy, I put them both in the same array
self._callbacks[:] = [
(callback.id(), callback(self._systems[callback.id()]))
for callback in self._callbacks
]
# Sort from lowest id to highest id for potentially better memory access
# _callbacks contains list of tuples. First element of tuple is rod number and
# following elements are the type of boundary condition such as
# [(0, MyCallBack), (1, MyVelocityCallBack), ... ]
# Thus using lambda we iterate over the list of tuples and use rod number (x[0])
# to sort callbacks.
self._callbacks.sort(key=lambda x: x[0])
self._callBack(time=0.0, current_step=0)
# TODO: same as above naming of _callBack function
def _callBack(self, time, current_step: int, *args, **kwargs):
for sys_id, callback in self._callbacks:
callback.make_callback(
self._systems[sys_id], time, current_step, *args, **kwargs
)
class _CallBack:
"""
CallBack wrapper private class
Attributes
----------
_sys_idx: rod object index
_callback_cls: list
*args
Variable length argument list.
**kwargs
Arbitrary keyword arguments.
"""
def __init__(self, sys_idx: int):
"""
Parameters
----------
sys_idx: int
"""
self._sys_idx = sys_idx
self._callback_cls = None
self._args = ()
self._kwargs = {}
def using(self, callback_cls, *args, **kwargs):
"""
This method is a wrapper to set which callback class is used to collect data
from user defined rod-like object.
Parameters
----------
callback_cls: object
User defined callback class.
*args
Variable length argument list
**kwargs
Arbitrary keyword arguments.
Returns
-------
"""
assert issubclass(
callback_cls, CallBackBaseClass
), "{} is not a valid call back. Did you forget to derive from CallBackClass?".format(
callback_cls
)
self._callback_cls = callback_cls
self._args = args
self._kwargs = kwargs
return self
def id(self):
return self._sys_idx
def __call__(self, *args, **kwargs):
"""Constructs a callback functions after checks
Parameters
----------
args
kwargs
Returns
-------
"""
if not self._callback_cls:
raise RuntimeError(
"No callback provided to act on rod id {0}"
"but a callback was registered. Did you forget to call"
"the `using` method".format(self.id())
)
try:
return self._callback_cls(*self._args, **self._kwargs)
except (TypeError, IndexError):
raise TypeError(
r"Unable to construct callback class.\n"
r"Did you provide all necessary callback properties?"
)
| [] |
HoonMinJeongUm/Hunmin-vitrage | vitrage/evaluator/template_data.py | 37d43d6b78e8b76fa6a2e83e5c739e9e4917a7b6 | # Copyright 2016 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import namedtuple
ActionSpecs = namedtuple(
'ActionSpecs', ['id', 'type', 'targets', 'properties'])
EdgeDescription = namedtuple('EdgeDescription', ['edge', 'source', 'target'])
ENTITY = 'entity'
RELATIONSHIP = 'relationship'
class Scenario(object):
def __init__(self, id, version, condition, actions, subgraphs, entities,
relationships, enabled=False):
self.id = id
self.version = version
self.condition = condition
self.actions = actions
self.subgraphs = subgraphs
self.entities = entities
self.relationships = relationships
self.enabled = enabled
def __eq__(self, other):
return self.id == other.id and \
self.condition == other.condition and \
self.actions == other.actions and \
self.subgraphs == other.subgraphs and \
self.entities == other.entities and \
self.relationships == other.relationships
# noinspection PyAttributeOutsideInit
class TemplateData(object):
def __init__(self, name, template_type, version, entities,
relationships, scenarios):
self.name = name
self.template_type = template_type
self.version = version
self.entities = entities
self.relationships = relationships
self.scenarios = scenarios
@property
def name(self):
return self._name
@name.setter
def name(self, template_name):
self._name = template_name
@property
def template_type(self):
return self._template_type
@template_type.setter
def template_type(self, template_type):
self._template_type = template_type
@property
def version(self):
return self._version
@version.setter
def version(self, version):
self._version = version
@property
def entities(self):
return self._entities
@entities.setter
def entities(self, entities):
self._entities = entities
@property
def relationships(self):
return self._relationships
@relationships.setter
def relationships(self, relationships):
self._relationships = relationships
@property
def scenarios(self):
return self._scenarios
@scenarios.setter
def scenarios(self, scenarios):
self._scenarios = scenarios
| [((18, 14, 19, 59), 'collections.namedtuple', 'namedtuple', ({(19, 4, 19, 17): '"""ActionSpecs"""', (19, 19, 19, 58): "['id', 'type', 'targets', 'properties']"}, {}), "('ActionSpecs', ['id', 'type', 'targets', 'properties'])", False, 'from collections import namedtuple\n'), ((20, 18, 20, 77), 'collections.namedtuple', 'namedtuple', ({(20, 29, 20, 46): '"""EdgeDescription"""', (20, 48, 20, 76): "['edge', 'source', 'target']"}, {}), "('EdgeDescription', ['edge', 'source', 'target'])", False, 'from collections import namedtuple\n')] |
rpetit3/anthrax-metagenome-study | scripts/summarize-kmer-counts.py | b4a6f2c4d49b57aeae898afd6a95c8f6cb437945 | #! /usr/bin/env python3
"""Parse through the simulated sequencing group specific kmer counts."""
import argparse as ap
from collections import OrderedDict
import glob
import gzip
import os
import sys
import time
import numpy as np
import multiprocessing as mp
SAMPLES = OrderedDict()
KMERS = {}
HAMMING = OrderedDict()
SAMPLE_COLS = [
'sample', 'is_bcg', 'is_ba', 'has_lethal', 'simulated_coverage', 'group',
'total_kmers', 'tp', 'tn', 'fp', 'fn',
'kmer_cov_min', 'kmer_cov_mean', 'kmer_cov_median', 'kmer_cov_max',
'non_zero_kmer_cov_min', 'non_zero_kmer_cov_mean',
'non_zero_kmer_cov_median', 'non_zero_kmer_cov_max'
]
KMER_COLS = [
'kmer', 'simulated_coverage', 'group', 'hamming_distance',
'tp', 'tn', 'fp', 'fn',
'group_kmer_cov_min',
'group_kmer_cov_mean',
'group_kmer_cov_median',
'group_kmer_cov_max',
'non_zero_group_kmer_cov_min',
'non_zero_group_kmer_cov_mean',
'non_zero_group_kmer_cov_median',
'non_zero_group_kmer_cov_max',
'outgroup_kmer_cov_min',
'outgroup_kmer_cov_mean',
'outgroup_kmer_cov_median',
'outgroup_kmer_cov_max',
'non_zero_outgroup_kmer_cov_min',
'non_zero_outgroup_kmer_cov_mean',
'non_zero_outgroup_kmer_cov_median',
'non_zero_outgroup_kmer_cov_max'
]
def get_group_status(sample, group):
"""Return if a sample is within a group or not."""
within_group = None
if group == 'ba':
within_group = True if SAMPLES[sample]['is_ba'] == 'True' else False
elif group == 'bcg':
within_group = True if SAMPLES[sample]['is_bcg'] == 'True' else False
else:
# lef
within_group = True if SAMPLES[sample]['has_lethal'] else False
return within_group
def get_coverage_stats(coverage):
"""Return summary stats of a set of coverages."""
non_zero = [c for c in coverage if c]
np_array = np.array(coverage)
non_zero_array = np.array(non_zero)
return {
'min': min(coverage) if coverage else 0,
'median': int(np.median(np_array)) if coverage else 0,
'mean': "{0:.4f}".format(np.mean(np_array)) if coverage else 0,
'max': max(coverage) if coverage else 0,
'non_zero_min': min(non_zero_array) if non_zero else 0,
'non_zero_median': int(np.median(non_zero_array)) if non_zero else 0,
'non_zero_mean': int(round(np.mean(non_zero_array))) if non_zero else 0,
'non_zero_max': max(non_zero_array) if non_zero else 0,
}
def reverse_complement(seq):
"""Reverse complement a DNA sequence."""
complement = {
'A': 'T', 'T': 'A', 'G': 'C', 'C': 'G',
'a': 't', 't': 'a', 'g': 'c', 'c': 'g'
}
return ''.join([complement[b] for b in seq[::-1]])
def parse_counts(counts, sample, coverage, group, skip_kmers=False,
filter_kmers=False):
"""Parse kmer counts."""
within_group = get_group_status(sample, group)
sample_row = {'coverages': [], 'tp': 0, 'tn': 0, 'fp': 0, 'fn': 0}
with gzip.open(counts, 'r') as count_handle:
for line in count_handle:
kmer, count = line.decode().rstrip().split()
count = int(count)
parse = True
if filter_kmers:
parse = kmer in KMERS or reverse_complement(kmer) in KMERS
elif not skip_kmers:
if kmer not in KMERS:
kmer = reverse_complement(kmer)
if within_group:
KMERS[kmer][coverage]['group_coverages'].append(count)
if count:
KMERS[kmer][coverage]['tp'] += 1
else:
KMERS[kmer][coverage]['fn'] += 1
else:
KMERS[kmer][coverage]['outgroup_coverages'].append(count)
if count:
KMERS[kmer][coverage]['fp'] += 1
else:
KMERS[kmer][coverage]['tn'] += 1
if parse:
sample_row['coverages'].append(count)
if within_group:
if count:
sample_row['tp'] += 1
else:
sample_row['fn'] += 1
else:
if count:
sample_row['fp'] += 1
else:
sample_row['tn'] += 1
coverage_stats = get_coverage_stats(sample_row['coverages'])
SAMPLES[sample]['results'].append({
'simulated_coverage': coverage,
'within_group': within_group,
'tp': sample_row['tp'],
'tn': sample_row['tn'],
'fp': sample_row['fp'],
'fn': sample_row['fn'],
'kmer_cov_min': coverage_stats['min'],
'kmer_cov_mean': coverage_stats['mean'],
'kmer_cov_median': coverage_stats['median'],
'kmer_cov_max': coverage_stats['max'],
'non_zero_kmer_cov_min': coverage_stats['non_zero_min'],
'non_zero_kmer_cov_mean': coverage_stats['non_zero_mean'],
'non_zero_kmer_cov_median': coverage_stats['non_zero_median'],
'non_zero_kmer_cov_max': coverage_stats['non_zero_max'],
})
def parse_kmers(kmers, coverages, skip_kmers=False, has_hamming=True):
with open(kmers, 'r') as kmer_handle:
for line in kmer_handle:
if line.startswith(">"):
line = line.rstrip().replace(">", "")
kmer, distance = line.split("-")
if not has_hamming:
distance = False
KMERS[kmer] = OrderedDict()
HAMMING[kmer] = distance
if not skip_kmers:
for coverage in coverages:
KMERS[kmer][coverage] = {
'group_coverages': [], 'outgroup_coverages': [],
'tp': 0, 'tn': 0, 'fp': 0, 'fn': 0
}
def parse_summary(summary):
"""Parse Summary file."""
cols = None
with open(summary, 'r') as summary_handle:
# Column Names:
# accession, gi, is_bcg, is_ba, species, genome_size, description
for line in summary_handle:
line = line.rstrip()
if line.startswith('#'):
cols = line.replace('#', '').split('\t')
else:
row = dict(zip(cols, line.split('\t')))
SAMPLES[row['accession']] = row
if row['accession'] == 'NZ_CP009941':
# NZ_CP009941 - Bacillus cereus w/ lef on chromosome
SAMPLES[row['accession']]['has_lethal'] = True
else:
SAMPLES[row['accession']]['has_lethal'] = False
SAMPLES[row['accession']]['results'] = []
def print_sample_summary(file_output):
"""Print the final per sample summaries."""
with open(file_output, 'w') as output_handle:
output_handle.write(("\t".join(SAMPLE_COLS)))
output_handle.write("\n")
for sample in SAMPLES:
if SAMPLES[sample]['results']:
for result in SAMPLES[sample]['results']:
row = {
'sample': sample,
'is_bcg': SAMPLES[sample]['is_bcg'],
'is_ba': SAMPLES[sample]['is_ba'],
'has_lethal': SAMPLES[sample]['has_lethal'],
'simulated_coverage': result['simulated_coverage'],
'group': args.group,
'within_group': result['within_group'],
'total_kmers': total_kmers,
'tp': result['tp'],
'tn': result['tn'],
'fp': result['fp'],
'fn': result['fn'],
'kmer_cov_min': result['kmer_cov_min'],
'kmer_cov_mean': result['kmer_cov_mean'],
'kmer_cov_median': result['kmer_cov_median'],
'kmer_cov_max': result['kmer_cov_max'],
'non_zero_kmer_cov_min': result['non_zero_kmer_cov_min'],
'non_zero_kmer_cov_mean': result['non_zero_kmer_cov_mean'],
'non_zero_kmer_cov_median': result['non_zero_kmer_cov_median'],
'non_zero_kmer_cov_max': result['non_zero_kmer_cov_max']
}
output_handle.write(("\t".join([
str(row[col]) for col in SAMPLE_COLS
])))
output_handle.write("\n")
def print_kmer_summary(file_output):
"""Print the final per kmer summaries."""
with open(file_output, 'w') as output_handle:
output_handle.write(("\t".join(KMER_COLS)))
output_handle.write("\n")
for kmer, coverages in KMERS.items():
for coverage in coverages:
within_group = get_coverage_stats(
KMERS[kmer][coverage]['group_coverages']
)
outgroup = get_coverage_stats(
KMERS[kmer][coverage]['outgroup_coverages']
)
row = {
'kmer': kmer,
'simulated_coverage': coverage,
'group': args.group,
'hamming_distance': HAMMING[kmer],
'tp': KMERS[kmer][coverage]['tp'],
'tn': KMERS[kmer][coverage]['tn'],
'fp': KMERS[kmer][coverage]['fp'],
'fn': KMERS[kmer][coverage]['fn'],
'group_kmer_cov_min': within_group['min'],
'group_kmer_cov_mean': within_group['mean'],
'group_kmer_cov_median': within_group['median'],
'group_kmer_cov_max': within_group['max'],
'non_zero_group_kmer_cov_min': within_group['non_zero_min'],
'non_zero_group_kmer_cov_mean': within_group['non_zero_mean'],
'non_zero_group_kmer_cov_median': within_group['non_zero_median'],
'non_zero_group_kmer_cov_max': within_group['non_zero_max'],
'outgroup_kmer_cov_min': outgroup['min'],
'outgroup_kmer_cov_mean': outgroup['mean'],
'outgroup_kmer_cov_median': outgroup['median'],
'outgroup_kmer_cov_max': outgroup['max'],
'non_zero_outgroup_kmer_cov_min': outgroup['non_zero_min'],
'non_zero_outgroup_kmer_cov_mean': outgroup['non_zero_mean'],
'non_zero_outgroup_kmer_cov_median': outgroup['non_zero_median'],
'non_zero_outgroup_kmer_cov_max': outgroup['non_zero_max'],
}
output_handle.write(("\t".join([
str(row[col]) for col in KMER_COLS
])))
output_handle.write("\n")
def read_lines(input_file):
"""Return lines in a text file as a list."""
lines = []
with open(input_file, 'r') as input_handle:
for line in input_handle:
lines.append(line.rstrip())
return lines
def parse_filter_kmers(kmers):
with open(kmers, 'r') as kmer_handle:
for line in kmer_handle:
if line.startswith(">"):
line = line.rstrip().replace(">", "")
KMERS[line.split("-")[0]] = True
if __name__ == '__main__':
parser = ap.ArgumentParser(
prog='summarize-kmer-counts.py', conflict_handler='resolve',
description=("Summarize kmer counts of each simulation.")
)
parser.add_argument('summary', type=str, metavar="SUMMARY",
help='Summary of Bacillus genomes.')
parser.add_argument('directory', type=str, metavar="SIMUALTION_DIR",
help='Directory with group specific 31-mer counts.')
parser.add_argument('group', type=str, metavar="GROUP",
help='Which group to parse (ba, bcg or lef).')
parser.add_argument('kmers', type=str, metavar="KMERS",
help='Group specific k-mers.')
parser.add_argument('coverages', type=str, metavar="COVERAGES",
help=('Coverages to subsample to.'))
parser.add_argument('outdir', type=str, metavar="OUTDIR",
help='Directory to output to.')
parser.add_argument('--cpu', default=1, type=int, metavar="INT",
help='Number of cores to use (Default: 1)')
parser.add_argument('--single_sample', type=str, metavar="STR",
help='Process a single sample.')
parser.add_argument('--skip_kmers', action='store_true', default=False,
help='Skip kmer processing.')
parser.add_argument('--filter', action='store_true', default=False,
help='Filter counts based on input kmers.')
args = parser.parse_args()
if args.group not in ['ba', 'bcg', 'lef']:
raise Exception("GROUPS must be 'ba', 'bcg' or 'lef'")
coverages = read_lines(args.coverages)
print("Parsing Summary")
parse_summary(args.summary)
print("Parsing Kmers")
if args.filter:
print("Filtering Kmers")
args.skip_kmers = True
parse_filter_kmers(args.kmers)
else:
print("Parsing Kmers")
parse_kmers(args.kmers, coverages, skip_kmers=args.skip_kmers,
has_hamming=False if args.group == 'lef' else True)
total_kmers = len(KMERS)
current = 1
samples = list(SAMPLES.keys())
if args.single_sample:
samples = [args.single_sample]
total = len(samples)
for sample in samples:
path = "{0}/{1}".format(args.directory, sample)
if os.path.exists(path):
print("Working on {0} ({1} of {2})".format(sample, current, total))
current += 1
count_files = sorted(glob.glob(
"{0}/*-{1}.txt.gz".format(path, args.group)
))
for count_file in count_files:
coverage = os.path.basename(count_file).split('-')[1]
parse_counts(count_file, sample, coverage, args.group,
skip_kmers=args.skip_kmers,
filter_kmers=args.filter)
print("Output sample summary")
if args.single_sample:
print_sample_summary("{0}/count-summary-{1}-{2}.txt".format(
args.outdir, args.single_sample, args.group
))
else:
print_sample_summary("{0}/count-summary-sample-{1}.txt".format(
args.outdir, args.group
))
if not args.skip_kmers:
print("Output kmer summary")
if args.single_sample:
print_kmer_summary("{0}/count-summary-kmer-{1}-{2}.txt".format(
args.outdir, args.single_sample, args.group
))
else:
print_kmer_summary("{0}/count-summary-kmer-{1}.txt".format(
args.outdir, args.group
))
| [((13, 10, 13, 23), 'collections.OrderedDict', 'OrderedDict', ({}, {}), '()', False, 'from collections import OrderedDict\n'), ((15, 10, 15, 23), 'collections.OrderedDict', 'OrderedDict', ({}, {}), '()', False, 'from collections import OrderedDict\n'), ((64, 15, 64, 33), 'numpy.array', 'np.array', ({(64, 24, 64, 32): 'coverage'}, {}), '(coverage)', True, 'import numpy as np\n'), ((65, 21, 65, 39), 'numpy.array', 'np.array', ({(65, 30, 65, 38): 'non_zero'}, {}), '(non_zero)', True, 'import numpy as np\n'), ((289, 13, 292, 5), 'argparse.ArgumentParser', 'ap.ArgumentParser', (), '', True, 'import argparse as ap\n'), ((92, 9, 92, 31), 'gzip.open', 'gzip.open', ({(92, 19, 92, 25): 'counts', (92, 27, 92, 30): '"""r"""'}, {}), "(counts, 'r')", False, 'import gzip\n'), ((340, 11, 340, 31), 'os.path.exists', 'os.path.exists', ({(340, 26, 340, 30): 'path'}, {}), '(path)', False, 'import os\n'), ((68, 22, 68, 41), 'numpy.median', 'np.median', ({(68, 32, 68, 40): 'np_array'}, {}), '(np_array)', True, 'import numpy as np\n'), ((69, 33, 69, 50), 'numpy.mean', 'np.mean', ({(69, 41, 69, 49): 'np_array'}, {}), '(np_array)', True, 'import numpy as np\n'), ((72, 31, 72, 56), 'numpy.median', 'np.median', ({(72, 41, 72, 55): 'non_zero_array'}, {}), '(non_zero_array)', True, 'import numpy as np\n'), ((156, 30, 156, 43), 'collections.OrderedDict', 'OrderedDict', ({}, {}), '()', False, 'from collections import OrderedDict\n'), ((73, 35, 73, 58), 'numpy.mean', 'np.mean', ({(73, 43, 73, 57): 'non_zero_array'}, {}), '(non_zero_array)', True, 'import numpy as np\n'), ((347, 27, 347, 55), 'os.path.basename', 'os.path.basename', ({(347, 44, 347, 54): 'count_file'}, {}), '(count_file)', False, 'import os\n')] |
te0dor/netguru-movies | movies/exceptions.py | 8e2cc4585851ad31794ec9e6a3e4dd70cc0980c5 | from marshmallow.exceptions import ValidationError
class ObjectDoesNotExist(Exception):
"""Exception if not found results"""
pass
class CommunicationError(Exception):
"""Exception for diferents problem with communications."""
pass
__all__ = ('ValidationError', 'ObjectDoesNotExist', 'CommunicationError')
| [] |
fejiroofficial/Simple_music | music_api/apps/music_app/admin.py | 2dd9dcf8e5c7374e29dcf96987c053eebf1cba2a | from django.contrib import admin
from .models import Songs
admin.site.register(Songs)
# Register your models here.
| [((4, 0, 4, 26), 'django.contrib.admin.site.register', 'admin.site.register', ({(4, 20, 4, 25): 'Songs'}, {}), '(Songs)', False, 'from django.contrib import admin\n')] |
JIC-Image-Analysis/senescence-in-field | scripts/generate_image_series.py | f310e34df377eb807423c38cf27d1ade0782f5a2 | # Draw image time series for one or more plots
from jicbioimage.core.image import Image
import dtoolcore
import click
from translate_labels import rack_plot_to_image_plot
from image_utils import join_horizontally, join_vertically
def identifiers_where_match_is_true(dataset, match_function):
return [i for i in dataset.identifiers if match_function(i)]
def generate_image_series_for_plot(rack, plot):
n_image, n_plot = rack_plot_to_image_plot(rack, plot)
# n_image, n_plot = 55, 24
print "{}_{}".format(n_image, n_plot)
dataset_uri = 'file:/Users/hartleym/data_intermediate/separate_plots'
dataset = dtoolcore.DataSet.from_uri(dataset_uri)
plot_number_overlay = dataset.get_overlay('plot_number')
ordering_overlay = dataset.get_overlay('ordering')
date_overlay = dataset.get_overlay('date')
def is_match(i):
try:
ordering_as_int = int(ordering_overlay[i])
except TypeError:
return False
if ordering_as_int != n_image:
return False
if int(plot_number_overlay[i]) != n_plot:
return False
return True
identifiers = identifiers_where_match_is_true(dataset, is_match)
def sort_identifiers_by_date(identifiers):
dates_and_identifiers = [(date_overlay[i], i) for i in identifiers]
sorted_dates_and_identifiers = sorted(dates_and_identifiers)
_, sorted_identifiers = zip(*sorted_dates_and_identifiers)
return(sorted_identifiers)
sorted_identifiers = sort_identifiers_by_date(identifiers)
def identifiers_to_joined_image(identifiers):
images = []
for identifier in identifiers:
image_fpath = dataset.item_content_abspath(identifier)
image = Image.from_file(image_fpath)
images.append(image)
return join_horizontally(images)
result = identifiers_to_joined_image(sorted_identifiers)
output_fname = 'example_from_tobin.png'
with open(output_fname, 'wb') as fh:
fh.write(result.png())
@click.command()
def main():
# Early leaf senescence
# generate_image_series_for_plot(3, 16)
# generate_image_series_for_plot(7, 9)
# generate_image_series_for_plot(9, 1)
# Late leaf senescence
generate_image_series_for_plot(7, 15)
if __name__ == '__main__':
main()
| [] |
Venafi/pytpp | pytpp/properties/response_objects/system_status.py | 42af655b2403b8c9447c86962abd4aaa0201f646 | from pytpp.properties.response_objects.dataclasses import system_status
from pytpp.tools.helpers.date_converter import from_date_string
class SystemStatus:
@staticmethod
def Engine(response_object: dict):
if not isinstance(response_object, dict):
response_object = {}
return system_status.Engine(
dn=response_object.get('DN'),
display_name=response_object.get('DisplayName'),
guid=response_object.get('Guid'),
id=response_object.get('Id'),
name=response_object.get('Name'),
)
@staticmethod
def Services(response_object: dict):
if not isinstance(response_object, dict):
response_object = {}
return system_status.Services(
vplatform=SystemStatus.Service(response_object.get('vPlatform')),
log_server=SystemStatus.Service(response_object.get('logServer')),
iis=SystemStatus.Service(response_object.get('iis')),
)
@staticmethod
def Service(response_object: dict):
if not isinstance(response_object, dict):
response_object = {}
return system_status.Service(
modules=response_object.get('modules'),
time_since_first_seen=from_date_string(response_object.get('timeSinceFirstSeen'), duration_format=True),
time_since_last_seen=from_date_string(response_object.get('timeSinceLastSeen'), duration_format=True),
status=response_object.get('Status'),
)
@staticmethod
def SystemStatus(response_object: dict):
if not isinstance(response_object, dict):
response_object = {}
return system_status.SystemStatus(
engine_name=response_object.get('engineName'),
services=SystemStatus.Services(response_object.get('services')),
version=response_object.get('version'),
)
@staticmethod
def Task(response_object: dict):
if not isinstance(response_object, dict):
response_object = {}
return system_status.Task(
display_name=response_object.get('DisplayName'),
name=response_object.get('Name'),
start_time=from_date_string(response_object.get('StartTime')),
stop_time=from_date_string(response_object.get('StopTime')),
warning_count=response_object.get('WarningCount'),
)
@staticmethod
def UpgradeInfo(response_object: dict):
if not isinstance(response_object, dict):
response_object = {}
return system_status.UpgradeInfo(
id=response_object.get('Id'),
start_time=from_date_string(response_object.get('StartTime')),
versions=response_object.get('Versions'),
)
@staticmethod
def UpgradeStatus(response_object: dict):
if not isinstance(response_object, dict):
response_object = {}
return system_status.UpgradeStatus(
engine=SystemStatus.Engine(response_object.get('Engine')),
status=response_object.get('Status'),
upgrade_start_time=from_date_string(response_object.get('UpgradeStartTime')),
upgrade_stop_time=from_date_string(response_object.get('UpgradeStopTime')),
tasks_completed=[SystemStatus.Task(t) for t in response_object.get('TasksCompleted', [])],
tasks_pending=[SystemStatus.Task(t) for t in response_object.get('TasksPending', [])],
tasks_running=[SystemStatus.Task(t) for t in response_object.get('TasksRunning', [])],
)
@staticmethod
def UpgradeSummary(response_object: dict):
if not isinstance(response_object, dict):
response_object = {}
return system_status.UpgradeSummary(
status=response_object.get('Status'),
upgrade_start_time=from_date_string(response_object.get('UpgradeStartTime')),
upgrade_stop_time=from_date_string(response_object.get('UpgradeStopTime')),
completed_tasks=response_object.get('CompletedTasks'),
target_version=response_object.get('TargetVersion'),
engines_complete=response_object.get('EnginesComplete'),
engines_running=response_object.get('EnginesRunning'),
engines_blocked=response_object.get('EnginesBlocked'),
engines_in_error=response_object.get('EnginesInError'),
engines_pending_install=response_object.get('EnginesPendingInstall'),
)
| [] |
mikkelfo/Title-prediction-from-abstract | src/data/dataModule.py | 45c9b64c963ae9b00c6b34a3f2b9f7c25496350e | from typing import Optional
import pytorch_lightning as pl
import torch
from omegaconf import OmegaConf
from torch.utils.data import DataLoader, random_split
from transformers import T5Tokenizer
from src.data.PaperDataset import PaperDataset
class ArvixDataModule(pl.LightningDataModule):
def __init__(self, config: str = "src/data/config.yaml") -> None:
super().__init__()
self.config = OmegaConf.load(config)
def prepare_data(self) -> None:
# Add tokenizing
tokenizer = T5Tokenizer.from_pretrained("t5-base")
titles, abstracts = torch.load("data/processed/data.pt").T
#titles, abstracts = torch.load("data/processed/data.pt").T
tokenized_abstracts = tokenizer.batch_encode_plus(
abstracts, padding=True, truncation=True, return_tensors="pt"
)
tokenized_titles = tokenizer.batch_encode_plus(
titles, padding=True, truncation=True, return_tensors="pt"
)
self.data = PaperDataset(tokenized_abstracts, tokenized_titles)
def setup(self, stage: Optional[str] = None):
train, val, test = random_split(
self.data,
[self.config.n_train, self.config.n_val, self.config.n_test],
generator=torch.Generator().manual_seed(1337),
)
if stage == "fit" or stage is None:
self.train_set = train
self.val_set = val
if stage == "test":
self.test_set = test
def train_dataloader(self) -> DataLoader:
return DataLoader(self.train_set, batch_size=32, num_workers=4)
def val_dataloader(self) -> DataLoader:
return DataLoader(self.val_set, batch_size=32, num_workers=4)
def test_dataloader(self) -> DataLoader:
return DataLoader(self.test_set, batch_size=32, num_workers=4)
if __name__ == "__main__":
dm = ArvixDataModule()
| [((15, 22, 15, 44), 'omegaconf.OmegaConf.load', 'OmegaConf.load', ({(15, 37, 15, 43): 'config'}, {}), '(config)', False, 'from omegaconf import OmegaConf\n'), ((19, 20, 19, 58), 'transformers.T5Tokenizer.from_pretrained', 'T5Tokenizer.from_pretrained', ({(19, 48, 19, 57): '"""t5-base"""'}, {}), "('t5-base')", False, 'from transformers import T5Tokenizer\n'), ((30, 20, 30, 71), 'src.data.PaperDataset.PaperDataset', 'PaperDataset', ({(30, 33, 30, 52): 'tokenized_abstracts', (30, 54, 30, 70): 'tokenized_titles'}, {}), '(tokenized_abstracts, tokenized_titles)', False, 'from src.data.PaperDataset import PaperDataset\n'), ((47, 15, 47, 71), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import DataLoader, random_split\n'), ((50, 15, 50, 69), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import DataLoader, random_split\n'), ((53, 15, 53, 70), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import DataLoader, random_split\n'), ((21, 28, 21, 64), 'torch.load', 'torch.load', ({(21, 39, 21, 63): '"""data/processed/data.pt"""'}, {}), "('data/processed/data.pt')", False, 'import torch\n'), ((36, 22, 36, 39), 'torch.Generator', 'torch.Generator', ({}, {}), '()', False, 'import torch\n')] |
ansobolev/shs | shs/gui/RootFrame.py | 7a5f61bd66fe1e8ae047a4d3400b055175a53f4e | # -*- coding: utf-8 -*-
import os
import sys
import time
import subprocess
import wx
import ConfigParser
from wx.lib.mixins.listctrl import getListCtrlSelection
from wx.lib.pubsub import pub
from gui.RootGUI import RootGUI
from StepsDialog import StepsDialog
from PlotFrame import PlotFuncFrame, PlotCorrFrame
import interface
import mbox
class RootFrame(RootGUI):
calcs = []
plot_frame = None
def __init__(self, *args, **kwds):
super(RootFrame, self).__init__(*args, **kwds)
# set root
self.root = self.set_root()
# initialize choices
self.propChoices = interface.dataClasses()
calc_data_types = self.propChoices.types()
calc_data_classes = self.propChoices.classes(calc_data_types[0])
corr_classes = self.propChoices.classes("Histogram")
self.propType.SetItems(calc_data_types)
self.propChoice.SetItems(calc_data_classes)
self.xCorr.SetItems(corr_classes)
self.yCorr.SetItems(corr_classes)
self.propType.SetSelection(0)
self.propChoice.SetSelection(0)
self.xCorr.SetSelection(0)
self.yCorr.SetSelection(0)
# initialize calc tree
self.build_tree(self.root, self.typeRBox.GetItemLabel(self.typeRBox.GetSelection()))
# initialize calc list
self.calcList.InsertColumn(0, 'Directory', width=180)
self.calcList.InsertColumn(1, 'Type', width=70)
self.calcList.InsertColumn(2, 'NSteps', width=100)
def set_root(self):
"""
Sets root directory fr GUI based on config file
:return: Root directory
"""
config_dir = os.path.expanduser("~/.local/shs")
config_file = os.path.join(config_dir, "shs_gui.cfg")
# check the file and create one if it's not there
if not os.path.isfile(config_file):
os.makedirs(config_dir)
open(config_file, 'w').close()
config = ConfigParser.ConfigParser()
config.read(config_file)
# if config exists and has needed option
if config.has_option("general", "root_dir"):
return config.get("general", "root_dir")
# make config
if not config.has_section("general"):
config.add_section("general")
dlg = wx.DirDialog(self, "Select root directory")
if dlg.ShowModal() == wx.ID_OK:
root_dir = dlg.GetPath()
config.set("general", "root_dir", root_dir)
else:
sys.exit(1)
with open(config_file, 'w') as f:
config.write(f)
return root_dir
def build_tree(self, root, calc_type):
"""
Adds a new root element and then its children
:param root: root directory for the tree
:param calc_type: calculation type
"""
self.calcTree.DeleteAllItems()
r = len(root.split(os.sep))
ids = {root: self.calcTree.AddRoot(root)}
for (dir_path, dir_names, file_names) in os.walk(root):
if interface.isCalcOfType(calc_type, dn=dir_names, fn=file_names):
# find the number of steps in MDE file, quickly
nsteps = interface.GetNumMDESteps(dir_path)
ancdirs = dir_path.split(os.sep)[r:]
if nsteps is not None:
ancdirs[-1] += ' [%i]' % nsteps
ad = root
for ancdir in ancdirs:
d = os.path.join(ad, ancdir)
if not d in ids:
ids[d] = self.calcTree.AppendItem(ids[ad], ancdir)
self.calcTree.SortChildren(ids[ad])
ad = d
def get_selection_dir(self):
item = self.calcTree.GetSelection()
parent = self.calcTree.GetItemParent(item)
path = [self.calcTree.GetItemText(item)]
while parent.IsOk():
path.append(self.calcTree.GetItemText(parent))
parent = self.calcTree.GetItemParent(parent)
# calculation directory
calc_dir = os.sep.join(path[::-1]).split()[0]
return calc_dir
# return os.sep.join((self.root, calc_dir))
def onSelChange(self, event):
# calculation type
ctype = self.typeRBox.GetItemLabel(self.typeRBox.GetSelection())
# calculation directory
cdir = self.get_selection_dir()
if interface.isCalcOfType(ctype, dir=cdir):
self.enqueueBtn.Enable()
else:
self.enqueueBtn.Enable(False)
def propTypeChange(self, event):
# property type
pt_num = self.propType.GetSelection()
pt = self.propType.GetItems()[pt_num]
self.propChoice.SetItems(self.propChoices.classes(pt))
self.propChoice.SetSelection(0)
def typeChange(self, event):
ctype = self.typeRBox.GetItemLabel(self.typeRBox.GetSelection())
self.build_tree(self.root, ctype)
def upBtnPress(self, event):
# selection indices
sind = getListCtrlSelection(self.calcList)
if sind:
# number of deleted strings
ds = 0
for si in sind:
self.calcs.pop(si - ds)
self.calcList.DeleteItem(si - ds)
ds += 1
return 0
return 1
def downBtnPress(self, event):
# current list count
clc = self.calcList.GetItemCount()
# calculation type
ctype = self.typeRBox.GetItemLabel(self.typeRBox.GetSelection())
# calculation directory
cdir = self.get_selection_dir()
if not interface.isCalcOfType(ctype, dir=cdir):
mbox.NoResults(cdir, ctype)
return 1
# init steps range
r = None
if ctype in ('.output', '.ANI'):
# enter dialog
dlg = StepsDialog(None)
if dlg.ShowModal() == wx.ID_OK:
r = dlg.GetRange()
dlg.Destroy()
self.calcs.append(interface.getCalc(cdir, ctype, r))
self.calcList.InsertStringItem(clc, cdir[len(self.root)+1:])
self.calcList.SetStringItem(clc, 1, ctype)
self.calcList.SetStringItem(clc, 2, str(len(r)) if r is not None else '')
return 0
def on_enqueue_press(self, _):
from sshutils import getMount, getDevice, getRemoteDir
# on which device are we?
calc_dir = self.get_selection_dir()
mount_path = getMount(calc_dir)
device_name, device_type = getDevice(mount_path)
if 'ssh' in device_type:
user, host_dir = device_name.split('@')
hostname, remote_mount_path = host_dir.split(':')
remote_dir = getRemoteDir(calc_dir, mount_path, remote_mount_path)
self.enqueue_remote(remote_dir, hostname, user)
else:
self.enqueue_local(calc_dir)
@staticmethod
def enqueue_local(calc_dir):
"""
Enqueue a task on a local filesystem
:param calc_dir: calculation directory on a local filesystem
:return: error_code (0 is OK)
"""
import distutils.spawn
# find which queue system is implemented on cluster (qstat - PBS, sinfo - SLURM)
if distutils.spawn.find_executable('qstat') is not None:
q = 'pbs'
elif distutils.spawn.find_executable('sinfo') is not None:
q = 'slurm'
else:
mbox.JobSubmit(None, ())
return -1
comm = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', q, q + '.sh'))
submit = subprocess.Popen(['/bin/bash', comm, '-d=' + calc_dir], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
mbox.JobSubmit(q, submit.communicate())
@staticmethod
def enqueue_remote(calc_dir, host, user):
"""
Enqueue a task on a remote filesystem
:param calc_dir: calculation directory on a remote filesystem
:param host: host where to enqueue a task
:param user: user of a remote system who enqueues a task
:return: error code (0 is OK)
"""
from sshutils import getSSHClient, getQueue, copyFile, removeFile, runCommand
ssh = getSSHClient(host, user)
# find which queue system is implemented on cluster (qstat - PBS, sinfo - SLURM)
q = getQueue(ssh)
if q is None:
mbox.JobSubmit(None, ())
return None
# queue putter on a local machine
local_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', q))
putter = q + '.sh'
sftp = copyFile(ssh, putter, local_dir, calc_dir)
remote_file = os.path.join(calc_dir, putter)
stdout, stderr = runCommand(ssh, 'bash ' + remote_file + ' -d=' + calc_dir)
mbox.JobSubmit(q, ('\n'.join(stdout.readlines()), '\n'.join(stderr.readlines())))
removeFile(sftp, remote_file)
ssh.close()
def plotBtnPress(self, event):
if self.noteBook.GetSelection() == 0:
self.plot_property()
else:
self.plot_correlation()
def plot_property(self):
# plot options - get all the data to plot
ptype = self.propType.GetItems()[self.propType.GetSelection()]
pchoice = self.propChoice.GetItems()[self.propChoice.GetSelection()]
data_class = self.propChoices.dataClass(ptype, pchoice)
leg = [self.calcList.GetItemText(i) for i in getListCtrlSelection(self.calcList)]
t1 = time.clock()
plot_data = interface.getData(ptype, data_class, leg,
[self.calcs[i] for i in getListCtrlSelection(self.calcList)])
self.SetStatusText('Calculation time: %7.2f s.' % (time.clock() - t1))
msg = plot_data
try:
self.plot_frame.Raise()
except (AttributeError, wx.PyDeadObjectError):
self.plot_frame = PlotFuncFrame(self)
self.plot_frame.Show()
pub.sendMessage('data.plot', message=msg)
def plot_correlation(self):
# correlate options - get all the data to plot
xchoice = self.xCorr.GetSelection()
ychoice = self.yCorr.GetSelection()
leg = [self.calcList.GetItemText(i) for i in getListCtrlSelection(self.calcList)]
data, info = interface.getCorr(xchoice, ychoice, [self.calcs[i] for i in getListCtrlSelection(self.calcList)])
msg = [leg, data, info]
try:
self.plot_frame.Raise()
except (AttributeError, wx.PyDeadObjectError):
self.plot_frame = PlotCorrFrame(self)
self.plot_frame.Show()
pub.sendMessage('corr.plot', message=msg)
| [((30, 27, 30, 50), 'interface.dataClasses', 'interface.dataClasses', ({}, {}), '()', False, 'import interface\n'), ((54, 21, 54, 55), 'os.path.expanduser', 'os.path.expanduser', ({(54, 40, 54, 54): '"""~/.local/shs"""'}, {}), "('~/.local/shs')", False, 'import os\n'), ((55, 22, 55, 61), 'os.path.join', 'os.path.join', ({(55, 35, 55, 45): 'config_dir', (55, 47, 55, 60): '"""shs_gui.cfg"""'}, {}), "(config_dir, 'shs_gui.cfg')", False, 'import os\n'), ((60, 17, 60, 44), 'ConfigParser.ConfigParser', 'ConfigParser.ConfigParser', ({}, {}), '()', False, 'import ConfigParser\n'), ((68, 14, 68, 57), 'wx.DirDialog', 'wx.DirDialog', ({(68, 27, 68, 31): 'self', (68, 33, 68, 56): '"""Select root directory"""'}, {}), "(self, 'Select root directory')", False, 'import wx\n'), ((87, 49, 87, 62), 'os.walk', 'os.walk', ({(87, 57, 87, 61): 'root'}, {}), '(root)', False, 'import os\n'), ((119, 11, 119, 50), 'interface.isCalcOfType', 'interface.isCalcOfType', (), '', False, 'import interface\n'), ((137, 15, 137, 50), 'wx.lib.mixins.listctrl.getListCtrlSelection', 'getListCtrlSelection', ({(137, 36, 137, 49): 'self.calcList'}, {}), '(self.calcList)', False, 'from wx.lib.mixins.listctrl import getListCtrlSelection\n'), ((176, 21, 176, 39), 'sshutils.getMount', 'getMount', ({(176, 30, 176, 38): 'calc_dir'}, {}), '(calc_dir)', False, 'from sshutils import getMount, getDevice, getRemoteDir\n'), ((177, 35, 177, 56), 'sshutils.getDevice', 'getDevice', ({(177, 45, 177, 55): 'mount_path'}, {}), '(mount_path)', False, 'from sshutils import getMount, getDevice, getRemoteDir\n'), ((204, 17, 204, 120), 'subprocess.Popen', 'subprocess.Popen', (), '', False, 'import subprocess\n'), ((217, 14, 217, 38), 'sshutils.getSSHClient', 'getSSHClient', ({(217, 27, 217, 31): 'host', (217, 33, 217, 37): 'user'}, {}), '(host, user)', False, 'from sshutils import getSSHClient, getQueue, copyFile, removeFile, runCommand\n'), ((219, 12, 219, 25), 'sshutils.getQueue', 'getQueue', ({(219, 21, 219, 24): 'ssh'}, {}), '(ssh)', False, 'from sshutils import getSSHClient, getQueue, copyFile, removeFile, runCommand\n'), ((227, 15, 227, 57), 'sshutils.copyFile', 'copyFile', ({(227, 24, 227, 27): 'ssh', (227, 29, 227, 35): 'putter', (227, 37, 227, 46): 'local_dir', (227, 48, 227, 56): 'calc_dir'}, {}), '(ssh, putter, local_dir, calc_dir)', False, 'from sshutils import getSSHClient, getQueue, copyFile, removeFile, runCommand\n'), ((228, 22, 228, 52), 'os.path.join', 'os.path.join', ({(228, 35, 228, 43): 'calc_dir', (228, 45, 228, 51): 'putter'}, {}), '(calc_dir, putter)', False, 'import os\n'), ((229, 25, 229, 83), 'sshutils.runCommand', 'runCommand', ({(229, 36, 229, 39): 'ssh', (229, 41, 229, 82): "'bash ' + remote_file + ' -d=' + calc_dir"}, {}), "(ssh, 'bash ' + remote_file + ' -d=' + calc_dir)", False, 'from sshutils import getSSHClient, getQueue, copyFile, removeFile, runCommand\n'), ((231, 8, 231, 37), 'sshutils.removeFile', 'removeFile', ({(231, 19, 231, 23): 'sftp', (231, 25, 231, 36): 'remote_file'}, {}), '(sftp, remote_file)', False, 'from sshutils import getSSHClient, getQueue, copyFile, removeFile, runCommand\n'), ((246, 13, 246, 25), 'time.clock', 'time.clock', ({}, {}), '()', False, 'import time\n'), ((256, 8, 256, 49), 'wx.lib.pubsub.pub.sendMessage', 'pub.sendMessage', (), '', False, 'from wx.lib.pubsub import pub\n'), ((270, 8, 270, 49), 'wx.lib.pubsub.pub.sendMessage', 'pub.sendMessage', (), '', False, 'from wx.lib.pubsub import pub\n'), ((57, 15, 57, 42), 'os.path.isfile', 'os.path.isfile', ({(57, 30, 57, 41): 'config_file'}, {}), '(config_file)', False, 'import os\n'), ((58, 12, 58, 35), 'os.makedirs', 'os.makedirs', ({(58, 24, 58, 34): 'config_dir'}, {}), '(config_dir)', False, 'import os\n'), ((73, 12, 73, 23), 'sys.exit', 'sys.exit', ({(73, 21, 73, 22): '(1)'}, {}), '(1)', False, 'import sys\n'), ((88, 15, 88, 77), 'interface.isCalcOfType', 'interface.isCalcOfType', (), '', False, 'import interface\n'), ((155, 15, 155, 54), 'interface.isCalcOfType', 'interface.isCalcOfType', (), '', False, 'import interface\n'), ((156, 12, 156, 39), 'mbox.NoResults', 'mbox.NoResults', ({(156, 27, 156, 31): 'cdir', (156, 33, 156, 38): 'ctype'}, {}), '(cdir, ctype)', False, 'import mbox\n'), ((162, 18, 162, 35), 'StepsDialog.StepsDialog', 'StepsDialog', ({(162, 30, 162, 34): 'None'}, {}), '(None)', False, 'from StepsDialog import StepsDialog\n'), ((166, 26, 166, 59), 'interface.getCalc', 'interface.getCalc', ({(166, 44, 166, 48): 'cdir', (166, 50, 166, 55): 'ctype', (166, 57, 166, 58): 'r'}, {}), '(cdir, ctype, r)', False, 'import interface\n'), ((181, 25, 181, 78), 'sshutils.getRemoteDir', 'getRemoteDir', ({(181, 38, 181, 46): 'calc_dir', (181, 48, 181, 58): 'mount_path', (181, 60, 181, 77): 'remote_mount_path'}, {}), '(calc_dir, mount_path, remote_mount_path)', False, 'from sshutils import getMount, getDevice, getRemoteDir\n'), ((221, 12, 221, 36), 'mbox.JobSubmit', 'mbox.JobSubmit', ({(221, 27, 221, 31): 'None', (221, 33, 221, 35): '()'}, {}), '(None, ())', False, 'import mbox\n'), ((90, 25, 90, 59), 'interface.GetNumMDESteps', 'interface.GetNumMDESteps', ({(90, 50, 90, 58): 'dir_path'}, {}), '(dir_path)', False, 'import interface\n'), ((200, 12, 200, 36), 'mbox.JobSubmit', 'mbox.JobSubmit', ({(200, 27, 200, 31): 'None', (200, 33, 200, 35): '()'}, {}), '(None, ())', False, 'import mbox\n'), ((202, 44, 202, 69), 'os.path.dirname', 'os.path.dirname', ({(202, 60, 202, 68): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((224, 49, 224, 74), 'os.path.dirname', 'os.path.dirname', ({(224, 65, 224, 73): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((245, 53, 245, 88), 'wx.lib.mixins.listctrl.getListCtrlSelection', 'getListCtrlSelection', ({(245, 74, 245, 87): 'self.calcList'}, {}), '(self.calcList)', False, 'from wx.lib.mixins.listctrl import getListCtrlSelection\n'), ((254, 30, 254, 49), 'PlotFrame.PlotFuncFrame', 'PlotFuncFrame', ({(254, 44, 254, 48): 'self'}, {}), '(self)', False, 'from PlotFrame import PlotFuncFrame, PlotCorrFrame\n'), ((262, 53, 262, 88), 'wx.lib.mixins.listctrl.getListCtrlSelection', 'getListCtrlSelection', ({(262, 74, 262, 87): 'self.calcList'}, {}), '(self.calcList)', False, 'from wx.lib.mixins.listctrl import getListCtrlSelection\n'), ((268, 30, 268, 49), 'PlotFrame.PlotCorrFrame', 'PlotCorrFrame', ({(268, 44, 268, 48): 'self'}, {}), '(self)', False, 'from PlotFrame import PlotFuncFrame, PlotCorrFrame\n'), ((96, 24, 96, 48), 'os.path.join', 'os.path.join', ({(96, 37, 96, 39): 'ad', (96, 41, 96, 47): 'ancdir'}, {}), '(ad, ancdir)', False, 'import os\n'), ((110, 19, 110, 42), 'os.sep.join', 'os.sep.join', ({(110, 31, 110, 41): 'path[::-1]'}, {}), '(path[::-1])', False, 'import os\n'), ((248, 62, 248, 97), 'wx.lib.mixins.listctrl.getListCtrlSelection', 'getListCtrlSelection', ({(248, 83, 248, 96): 'self.calcList'}, {}), '(self.calcList)', False, 'from wx.lib.mixins.listctrl import getListCtrlSelection\n'), ((249, 59, 249, 71), 'time.clock', 'time.clock', ({}, {}), '()', False, 'import time\n'), ((263, 81, 263, 116), 'wx.lib.mixins.listctrl.getListCtrlSelection', 'getListCtrlSelection', ({(263, 102, 263, 115): 'self.calcList'}, {}), '(self.calcList)', False, 'from wx.lib.mixins.listctrl import getListCtrlSelection\n')] |
acabezasg/urpi-master | saleor/order/migrations/0015_auto_20170206_0407.py | 7c9cd0fbe6d89dad70652482712ca38b21ba6f84 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-06 10:07
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_prices.models
class Migration(migrations.Migration):
dependencies = [
('order', '0014_auto_20161028_0955'),
]
operations = [
migrations.AlterModelOptions(
name='deliverygroup',
options={'verbose_name': 'Delivery Group', 'verbose_name_plural': 'Delivery Groups'},
),
migrations.AlterModelOptions(
name='order',
options={'ordering': ('-last_status_change',), 'verbose_name': 'Order', 'verbose_name_plural': 'Orders'},
),
migrations.AlterModelOptions(
name='ordereditem',
options={'verbose_name': 'Ordered item', 'verbose_name_plural': 'Ordered items'},
),
migrations.AlterModelOptions(
name='orderhistoryentry',
options={'ordering': ('date',), 'verbose_name': 'Order history entry', 'verbose_name_plural': 'Order history entries'},
),
migrations.AlterModelOptions(
name='ordernote',
options={'verbose_name': 'Order note', 'verbose_name_plural': 'Order notes'},
),
migrations.AlterModelOptions(
name='payment',
options={'ordering': ('-pk',), 'verbose_name': 'Payment', 'verbose_name_plural': 'Payments'},
),
migrations.AlterField(
model_name='deliverygroup',
name='last_updated',
field=models.DateTimeField(auto_now=True, null=True, verbose_name='last updated'),
),
migrations.AlterField(
model_name='deliverygroup',
name='shipping_method_name',
field=models.CharField(blank=True, default=None, editable=False, max_length=255, null=True, verbose_name='shipping method name'),
),
migrations.AlterField(
model_name='deliverygroup',
name='tracking_number',
field=models.CharField(blank=True, default='', max_length=255, verbose_name='tracking number'),
),
migrations.AlterField(
model_name='order',
name='billing_address',
field=models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='account.Address', verbose_name='billing address'),
),
migrations.AlterField(
model_name='order',
name='discount_amount',
field=django_prices.models.MoneyField(blank=True, currency=settings.DEFAULT_CURRENCY, decimal_places=2, max_digits=12, null=True, verbose_name='discount amount'),
),
migrations.AlterField(
model_name='order',
name='discount_name',
field=models.CharField(blank=True, default='', max_length=255, verbose_name='discount name'),
),
migrations.AlterField(
model_name='order',
name='shipping_address',
field=models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='account.Address', verbose_name='shipping address'),
),
migrations.AlterField(
model_name='order',
name='total_net',
field=django_prices.models.MoneyField(blank=True, currency=settings.DEFAULT_CURRENCY, decimal_places=2, max_digits=12, null=True, verbose_name='total net'),
),
migrations.AlterField(
model_name='order',
name='total_tax',
field=django_prices.models.MoneyField(blank=True, currency=settings.DEFAULT_CURRENCY, decimal_places=2, max_digits=12, null=True, verbose_name='total tax'),
),
migrations.AlterField(
model_name='order',
name='tracking_client_id',
field=models.CharField(blank=True, editable=False, max_length=36, verbose_name='tracking client id'),
),
migrations.AlterField(
model_name='order',
name='user_email',
field=models.EmailField(blank=True, default='', editable=False, max_length=254, verbose_name='user email'),
),
migrations.AlterField(
model_name='order',
name='voucher',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='discount.Voucher', verbose_name='voucher'),
),
migrations.AlterField(
model_name='ordereditem',
name='delivery_group',
field=models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='items', to='order.DeliveryGroup', verbose_name='delivery group'),
),
migrations.AlterField(
model_name='ordereditem',
name='stock',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='product.Stock', verbose_name='stock'),
),
migrations.AlterField(
model_name='orderhistoryentry',
name='comment',
field=models.CharField(blank=True, default='', max_length=100, verbose_name='comment'),
),
migrations.AlterField(
model_name='orderhistoryentry',
name='order',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='history', to='order.Order', verbose_name='order'),
),
migrations.AlterField(
model_name='orderhistoryentry',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='user'),
),
migrations.AlterField(
model_name='payment',
name='order',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='payments', to='order.Order', verbose_name='order'),
),
]
| [((18, 8, 21, 9), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', (), '', False, 'from django.db import migrations, models\n'), ((22, 8, 25, 9), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', (), '', False, 'from django.db import migrations, models\n'), ((26, 8, 29, 9), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', (), '', False, 'from django.db import migrations, models\n'), ((30, 8, 33, 9), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', (), '', False, 'from django.db import migrations, models\n'), ((34, 8, 37, 9), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', (), '', False, 'from django.db import migrations, models\n'), ((38, 8, 41, 9), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', (), '', False, 'from django.db import migrations, models\n'), ((45, 18, 45, 93), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import migrations, models\n'), ((50, 18, 50, 140), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((55, 18, 55, 106), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((60, 18, 60, 168), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((70, 18, 70, 104), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((75, 18, 75, 180), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((90, 18, 90, 112), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((95, 18, 95, 118), 'django.db.models.EmailField', 'models.EmailField', (), '', False, 'from django.db import migrations, models\n'), ((100, 18, 100, 157), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((105, 18, 105, 175), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((110, 18, 110, 134), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((115, 18, 115, 98), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((120, 18, 120, 144), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((125, 18, 125, 153), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((130, 18, 130, 145), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n')] |
tonybearpan/testrail-lib | testrail_client/api/configurations.py | 267070bd017bb1d80ac40e1b84ea40dc2c2e3956 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from .base import TestRailAPIBase
class Config(TestRailAPIBase):
"""
Use the following API methods to request details
about configurations and to create or modify configurations.
"""
def __repr__(self):
return '<TestRailAPI config>'
def get(self, project_id):
"""
Returns a list of available configurations,
grouped by configuration groups (requires TestRail 3.1 or later).
:param project_id: The ID of the project
"""
return self._get('get_configs/{}'.format(project_id))
def add(self, config_group_id, name=''):
"""
Creates a new configuration (requires TestRail 5.2 or later).
:param config_group_id: The ID of the configuration group
the configuration should be added to
:param name: str, The name of the configuration (required)
"""
return self._post('add_config/{}'.format(config_group_id),
json=dict(name=name))
def update(self, config_group_id, name=''):
"""
Updates an existing configuration (requires TestRail 5.2 or later).
:param config_group_id: The ID of the configuration group
the configuration should be added to
:param name: str, The name of the configuration (required)
"""
return self._post('update_config/{}'.format(config_group_id),
json=dict(name=name))
def delete(self, config_id):
"""
Deletes an existing configuration (requires TestRail 5.2 or later).
:param config_id:
"""
return self._post('delete_config/{}'.format(config_id))
def add_group(self, project_id, name=''):
"""
Creates a new configuration group (requires TestRail 5.2 or later).
:param project_id: The ID of the project the configuration group should be added to
:param name: The name of the configuration group (required)
"""
return self._post('add_config_group/{}'.format(project_id),
json=dict(name=name))
def update_group(self, config_group_id, name):
"""
Updates an existing configuration group (requires TestRail 5.2 or later).
:param config_group_id: The ID of the configuration group
:param name: The name of the configuration group
"""
return self._post('update_config_group/{}'.format(config_group_id),
json=dict(name=name))
def delete_group(self, config_group_id):
"""
Deletes an existing configuration (requires TestRail 5.2 or later).
:param config_group_id: The ID of the configuration
"""
return self._post('delete_config_group/{}'.format(config_group_id))
def priority(self):
"""
Returns a list of available priorities.
"""
return self._get('get_priorities')
def template(self, project_id):
"""
Returns a list of available templates (requires TestRail 5.2 or later).
:param project_id:The ID of the project
"""
return self._get('get_templates/{}'.format(project_id))
| [] |
QARancher/k8s_client | tests/asserts_wrapper.py | b290caa5db12498ed9fbb2c972ab20141ff2c401 | def assert_not_none(actual_result, message=""):
if not message:
message = f"{actual_result} resulted with None"
assert actual_result, message
def assert_equal(actual_result, expected_result, message=""):
if not message:
message = f"{actual_result} is not equal to expected " \
f"result {expected_result}"
assert actual_result == expected_result, message
def assert_in_list(searched_list, wanted_element, message=""):
if not message:
message = f"Failed to find '{wanted_element}' in list {searched_list}"
assert wanted_element in searched_list, message
def assert_not_in_list(searched_list, unwanted_element, message=""):
if not message:
message = f"'{unwanted_element}' found in list {searched_list} \n " \
f"although it should not be"
assert unwanted_element not in searched_list, message
def assert_of_type(wanted_type, wanted_object, message=""):
if not message:
message = f"{wanted_object} is not of type: {wanted_type}"
assert isinstance(wanted_object, wanted_type), message
| [] |
ctring/Detock | tools/proto/transaction_pb2.py | a1171a511d9cd1f79cc3a8d54ec17f759d088de4 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: proto/transaction.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='proto/transaction.proto',
package='slog',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x17proto/transaction.proto\x12\x04slog\"1\n\x0eMasterMetadata\x12\x0e\n\x06master\x18\x01 \x01(\r\x12\x0f\n\x07\x63ounter\x18\x02 \x01(\r\"\x81\x01\n\nValueEntry\x12\r\n\x05value\x18\x01 \x01(\x0c\x12\x11\n\tnew_value\x18\x02 \x01(\x0c\x12\x1b\n\x04type\x18\x03 \x01(\x0e\x32\r.slog.KeyType\x12(\n\x08metadata\x18\x04 \x01(\x0b\x32\x14.slog.MasterMetadataH\x00\x42\n\n\x08optional\"C\n\rKeyValueEntry\x12\x0b\n\x03key\x18\x01 \x01(\x0c\x12%\n\x0bvalue_entry\x18\x02 \x01(\x0b\x32\x10.slog.ValueEntry\"j\n\x14TransactionEventInfo\x12%\n\x05\x65vent\x18\x01 \x01(\x0e\x32\x16.slog.TransactionEvent\x12\x0c\n\x04time\x18\x02 \x01(\x03\x12\x0f\n\x07machine\x18\x03 \x01(\x05\x12\x0c\n\x04home\x18\x04 \x01(\x05\"\x8c\x03\n\x13TransactionInternal\x12\n\n\x02id\x18\x01 \x01(\x04\x12#\n\x04type\x18\x02 \x01(\x0e\x32\x15.slog.TransactionType\x12\x0c\n\x04home\x18\x03 \x01(\x05\x12\x1b\n\x13\x63oordinating_server\x18\x04 \x01(\r\x12\x11\n\ttimestamp\x18\x05 \x01(\x03\x12\x1b\n\x13involved_partitions\x18\x06 \x03(\r\x12\x19\n\x11\x61\x63tive_partitions\x18\x07 \x03(\r\x12\x18\n\x10involved_regions\x18\x08 \x03(\r\x12*\n\x06\x65vents\x18\t \x03(\x0b\x32\x1a.slog.TransactionEventInfo\x12\'\n\x1fmh_depart_from_coordinator_time\x18\n \x01(\x03\x12\x1e\n\x16mh_arrive_at_home_time\x18\x0b \x01(\x03\x12!\n\x19mh_enter_local_batch_time\x18\x0c \x01(\x03\x12\x1c\n\x14global_log_positions\x18\r \x03(\x03\"H\n\x11RemasterProcedure\x12\x12\n\nnew_master\x18\x01 \x01(\r\x12\x1f\n\x17is_new_master_lock_only\x18\x02 \x01(\x08\"\x19\n\tProcedure\x12\x0c\n\x04\x61rgs\x18\x01 \x03(\x0c\"1\n\nProcedures\x12#\n\nprocedures\x18\x01 \x03(\x0b\x32\x0f.slog.Procedure\"\xb1\x02\n\x0bTransaction\x12+\n\x08internal\x18\x01 \x01(\x0b\x32\x19.slog.TransactionInternal\x12 \n\x04\x63ode\x18\x02 \x01(\x0b\x32\x10.slog.ProceduresH\x00\x12+\n\x08remaster\x18\x03 \x01(\x0b\x32\x17.slog.RemasterProcedureH\x00\x12!\n\x04keys\x18\x04 \x03(\x0b\x32\x13.slog.KeyValueEntry\x12\x14\n\x0c\x64\x65leted_keys\x18\x05 \x03(\x0c\x12\'\n\x06status\x18\x06 \x01(\x0e\x32\x17.slog.TransactionStatus\x12#\n\nabort_code\x18\x07 \x01(\x0e\x32\x0f.slog.AbortCode\x12\x14\n\x0c\x61\x62ort_reason\x18\x08 \x01(\tB\t\n\x07program*L\n\x0fTransactionType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0f\n\x0bSINGLE_HOME\x10\x01\x12\x1b\n\x17MULTI_HOME_OR_LOCK_ONLY\x10\x02*@\n\x11TransactionStatus\x12\x0f\n\x0bNOT_STARTED\x10\x00\x12\r\n\tCOMMITTED\x10\x01\x12\x0b\n\x07\x41\x42ORTED\x10\x02*7\n\tAbortCode\x12\t\n\x05OTHER\x10\x00\x12\x10\n\x0cRATE_LIMITED\x10\x01\x12\r\n\tRESTARTED\x10\x02*\x1e\n\x07KeyType\x12\x08\n\x04READ\x10\x00\x12\t\n\x05WRITE\x10\x01*\xde\x06\n\x10TransactionEvent\x12\x07\n\x03\x41LL\x10\x00\x12\x10\n\x0c\x45NTER_SERVER\x10\x01\x12\x1c\n\x18\x45XIT_SERVER_TO_FORWARDER\x10\x02\x12\x13\n\x0f\x45NTER_FORWARDER\x10\x03\x12\x1f\n\x1b\x45XIT_FORWARDER_TO_SEQUENCER\x10\x04\x12(\n$EXIT_FORWARDER_TO_MULTI_HOME_ORDERER\x10\x05\x12\x1c\n\x18\x45NTER_MULTI_HOME_ORDERER\x10\x06\x12%\n!ENTER_MULTI_HOME_ORDERER_IN_BATCH\x10\x07\x12$\n EXIT_MULTI_HOME_ORDERER_IN_BATCH\x10\x08\x12\x1b\n\x17\x45XIT_MULTI_HOME_ORDERER\x10\t\x12\x13\n\x0f\x45NTER_SEQUENCER\x10\n\x12.\n*EXPECTED_WAIT_TIME_UNTIL_ENTER_LOCAL_BATCH\x10\x0b\x12\x15\n\x11\x45NTER_LOCAL_BATCH\x10\x0c\x12\x1c\n\x18\x45NTER_SEQUENCER_IN_BATCH\x10\r\x12\x1b\n\x17\x45XIT_SEQUENCER_IN_BATCH\x10\x0e\x12\x1e\n\x1a\x45NTER_LOG_MANAGER_IN_BATCH\x10\x0f\x12\x1b\n\x17\x45NTER_LOG_MANAGER_ORDER\x10\x10\x12\x14\n\x10\x45XIT_LOG_MANAGER\x10\x11\x12\x13\n\x0f\x45NTER_SCHEDULER\x10\x12\x12\x16\n\x12\x45NTER_SCHEDULER_LO\x10\x13\x12\x16\n\x12\x45NTER_LOCK_MANAGER\x10\x14\x12\x15\n\x11\x44\x45\x41\x44LOCK_DETECTED\x10\x15\x12\x0e\n\nDISPATCHED\x10\x16\x12\x13\n\x0f\x44ISPATCHED_FAST\x10\x17\x12\x13\n\x0f\x44ISPATCHED_SLOW\x10\x18\x12\x1e\n\x1a\x44ISPATCHED_SLOW_DEADLOCKED\x10\x19\x12\x10\n\x0c\x45NTER_WORKER\x10\x1a\x12\x14\n\x10GOT_REMOTE_READS\x10\x1b\x12\x1f\n\x1bGOT_REMOTE_READS_DEADLOCKED\x10\x1c\x12\x0f\n\x0b\x45XIT_WORKER\x10\x1d\x12\x14\n\x10RETURN_TO_SERVER\x10\x1e\x12\x19\n\x15\x45XIT_SERVER_TO_CLIENT\x10\x1f\x62\x06proto3'
)
_TRANSACTIONTYPE = _descriptor.EnumDescriptor(
name='TransactionType',
full_name='slog.TransactionType',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='SINGLE_HOME', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MULTI_HOME_OR_LOCK_ONLY', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=1252,
serialized_end=1328,
)
_sym_db.RegisterEnumDescriptor(_TRANSACTIONTYPE)
TransactionType = enum_type_wrapper.EnumTypeWrapper(_TRANSACTIONTYPE)
_TRANSACTIONSTATUS = _descriptor.EnumDescriptor(
name='TransactionStatus',
full_name='slog.TransactionStatus',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='NOT_STARTED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='COMMITTED', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ABORTED', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=1330,
serialized_end=1394,
)
_sym_db.RegisterEnumDescriptor(_TRANSACTIONSTATUS)
TransactionStatus = enum_type_wrapper.EnumTypeWrapper(_TRANSACTIONSTATUS)
_ABORTCODE = _descriptor.EnumDescriptor(
name='AbortCode',
full_name='slog.AbortCode',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='OTHER', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='RATE_LIMITED', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='RESTARTED', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=1396,
serialized_end=1451,
)
_sym_db.RegisterEnumDescriptor(_ABORTCODE)
AbortCode = enum_type_wrapper.EnumTypeWrapper(_ABORTCODE)
_KEYTYPE = _descriptor.EnumDescriptor(
name='KeyType',
full_name='slog.KeyType',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='READ', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='WRITE', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=1453,
serialized_end=1483,
)
_sym_db.RegisterEnumDescriptor(_KEYTYPE)
KeyType = enum_type_wrapper.EnumTypeWrapper(_KEYTYPE)
_TRANSACTIONEVENT = _descriptor.EnumDescriptor(
name='TransactionEvent',
full_name='slog.TransactionEvent',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='ALL', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ENTER_SERVER', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='EXIT_SERVER_TO_FORWARDER', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ENTER_FORWARDER', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='EXIT_FORWARDER_TO_SEQUENCER', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='EXIT_FORWARDER_TO_MULTI_HOME_ORDERER', index=5, number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ENTER_MULTI_HOME_ORDERER', index=6, number=6,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ENTER_MULTI_HOME_ORDERER_IN_BATCH', index=7, number=7,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='EXIT_MULTI_HOME_ORDERER_IN_BATCH', index=8, number=8,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='EXIT_MULTI_HOME_ORDERER', index=9, number=9,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ENTER_SEQUENCER', index=10, number=10,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='EXPECTED_WAIT_TIME_UNTIL_ENTER_LOCAL_BATCH', index=11, number=11,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ENTER_LOCAL_BATCH', index=12, number=12,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ENTER_SEQUENCER_IN_BATCH', index=13, number=13,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='EXIT_SEQUENCER_IN_BATCH', index=14, number=14,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ENTER_LOG_MANAGER_IN_BATCH', index=15, number=15,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ENTER_LOG_MANAGER_ORDER', index=16, number=16,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='EXIT_LOG_MANAGER', index=17, number=17,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ENTER_SCHEDULER', index=18, number=18,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ENTER_SCHEDULER_LO', index=19, number=19,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ENTER_LOCK_MANAGER', index=20, number=20,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='DEADLOCK_DETECTED', index=21, number=21,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='DISPATCHED', index=22, number=22,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='DISPATCHED_FAST', index=23, number=23,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='DISPATCHED_SLOW', index=24, number=24,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='DISPATCHED_SLOW_DEADLOCKED', index=25, number=25,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ENTER_WORKER', index=26, number=26,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='GOT_REMOTE_READS', index=27, number=27,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='GOT_REMOTE_READS_DEADLOCKED', index=28, number=28,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='EXIT_WORKER', index=29, number=29,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='RETURN_TO_SERVER', index=30, number=30,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='EXIT_SERVER_TO_CLIENT', index=31, number=31,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=1486,
serialized_end=2348,
)
_sym_db.RegisterEnumDescriptor(_TRANSACTIONEVENT)
TransactionEvent = enum_type_wrapper.EnumTypeWrapper(_TRANSACTIONEVENT)
UNKNOWN = 0
SINGLE_HOME = 1
MULTI_HOME_OR_LOCK_ONLY = 2
NOT_STARTED = 0
COMMITTED = 1
ABORTED = 2
OTHER = 0
RATE_LIMITED = 1
RESTARTED = 2
READ = 0
WRITE = 1
ALL = 0
ENTER_SERVER = 1
EXIT_SERVER_TO_FORWARDER = 2
ENTER_FORWARDER = 3
EXIT_FORWARDER_TO_SEQUENCER = 4
EXIT_FORWARDER_TO_MULTI_HOME_ORDERER = 5
ENTER_MULTI_HOME_ORDERER = 6
ENTER_MULTI_HOME_ORDERER_IN_BATCH = 7
EXIT_MULTI_HOME_ORDERER_IN_BATCH = 8
EXIT_MULTI_HOME_ORDERER = 9
ENTER_SEQUENCER = 10
EXPECTED_WAIT_TIME_UNTIL_ENTER_LOCAL_BATCH = 11
ENTER_LOCAL_BATCH = 12
ENTER_SEQUENCER_IN_BATCH = 13
EXIT_SEQUENCER_IN_BATCH = 14
ENTER_LOG_MANAGER_IN_BATCH = 15
ENTER_LOG_MANAGER_ORDER = 16
EXIT_LOG_MANAGER = 17
ENTER_SCHEDULER = 18
ENTER_SCHEDULER_LO = 19
ENTER_LOCK_MANAGER = 20
DEADLOCK_DETECTED = 21
DISPATCHED = 22
DISPATCHED_FAST = 23
DISPATCHED_SLOW = 24
DISPATCHED_SLOW_DEADLOCKED = 25
ENTER_WORKER = 26
GOT_REMOTE_READS = 27
GOT_REMOTE_READS_DEADLOCKED = 28
EXIT_WORKER = 29
RETURN_TO_SERVER = 30
EXIT_SERVER_TO_CLIENT = 31
_MASTERMETADATA = _descriptor.Descriptor(
name='MasterMetadata',
full_name='slog.MasterMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='master', full_name='slog.MasterMetadata.master', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='counter', full_name='slog.MasterMetadata.counter', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=33,
serialized_end=82,
)
_VALUEENTRY = _descriptor.Descriptor(
name='ValueEntry',
full_name='slog.ValueEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='slog.ValueEntry.value', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='new_value', full_name='slog.ValueEntry.new_value', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='type', full_name='slog.ValueEntry.type', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='metadata', full_name='slog.ValueEntry.metadata', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='optional', full_name='slog.ValueEntry.optional',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=85,
serialized_end=214,
)
_KEYVALUEENTRY = _descriptor.Descriptor(
name='KeyValueEntry',
full_name='slog.KeyValueEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='slog.KeyValueEntry.key', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value_entry', full_name='slog.KeyValueEntry.value_entry', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=216,
serialized_end=283,
)
_TRANSACTIONEVENTINFO = _descriptor.Descriptor(
name='TransactionEventInfo',
full_name='slog.TransactionEventInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='event', full_name='slog.TransactionEventInfo.event', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='time', full_name='slog.TransactionEventInfo.time', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='machine', full_name='slog.TransactionEventInfo.machine', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='home', full_name='slog.TransactionEventInfo.home', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=285,
serialized_end=391,
)
_TRANSACTIONINTERNAL = _descriptor.Descriptor(
name='TransactionInternal',
full_name='slog.TransactionInternal',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='slog.TransactionInternal.id', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='type', full_name='slog.TransactionInternal.type', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='home', full_name='slog.TransactionInternal.home', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='coordinating_server', full_name='slog.TransactionInternal.coordinating_server', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='timestamp', full_name='slog.TransactionInternal.timestamp', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='involved_partitions', full_name='slog.TransactionInternal.involved_partitions', index=5,
number=6, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='active_partitions', full_name='slog.TransactionInternal.active_partitions', index=6,
number=7, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='involved_regions', full_name='slog.TransactionInternal.involved_regions', index=7,
number=8, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='events', full_name='slog.TransactionInternal.events', index=8,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='mh_depart_from_coordinator_time', full_name='slog.TransactionInternal.mh_depart_from_coordinator_time', index=9,
number=10, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='mh_arrive_at_home_time', full_name='slog.TransactionInternal.mh_arrive_at_home_time', index=10,
number=11, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='mh_enter_local_batch_time', full_name='slog.TransactionInternal.mh_enter_local_batch_time', index=11,
number=12, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='global_log_positions', full_name='slog.TransactionInternal.global_log_positions', index=12,
number=13, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=394,
serialized_end=790,
)
_REMASTERPROCEDURE = _descriptor.Descriptor(
name='RemasterProcedure',
full_name='slog.RemasterProcedure',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='new_master', full_name='slog.RemasterProcedure.new_master', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='is_new_master_lock_only', full_name='slog.RemasterProcedure.is_new_master_lock_only', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=792,
serialized_end=864,
)
_PROCEDURE = _descriptor.Descriptor(
name='Procedure',
full_name='slog.Procedure',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='args', full_name='slog.Procedure.args', index=0,
number=1, type=12, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=866,
serialized_end=891,
)
_PROCEDURES = _descriptor.Descriptor(
name='Procedures',
full_name='slog.Procedures',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='procedures', full_name='slog.Procedures.procedures', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=893,
serialized_end=942,
)
_TRANSACTION = _descriptor.Descriptor(
name='Transaction',
full_name='slog.Transaction',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='internal', full_name='slog.Transaction.internal', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='code', full_name='slog.Transaction.code', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='remaster', full_name='slog.Transaction.remaster', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='keys', full_name='slog.Transaction.keys', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='deleted_keys', full_name='slog.Transaction.deleted_keys', index=4,
number=5, type=12, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='status', full_name='slog.Transaction.status', index=5,
number=6, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='abort_code', full_name='slog.Transaction.abort_code', index=6,
number=7, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='abort_reason', full_name='slog.Transaction.abort_reason', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='program', full_name='slog.Transaction.program',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=945,
serialized_end=1250,
)
_VALUEENTRY.fields_by_name['type'].enum_type = _KEYTYPE
_VALUEENTRY.fields_by_name['metadata'].message_type = _MASTERMETADATA
_VALUEENTRY.oneofs_by_name['optional'].fields.append(
_VALUEENTRY.fields_by_name['metadata'])
_VALUEENTRY.fields_by_name['metadata'].containing_oneof = _VALUEENTRY.oneofs_by_name['optional']
_KEYVALUEENTRY.fields_by_name['value_entry'].message_type = _VALUEENTRY
_TRANSACTIONEVENTINFO.fields_by_name['event'].enum_type = _TRANSACTIONEVENT
_TRANSACTIONINTERNAL.fields_by_name['type'].enum_type = _TRANSACTIONTYPE
_TRANSACTIONINTERNAL.fields_by_name['events'].message_type = _TRANSACTIONEVENTINFO
_PROCEDURES.fields_by_name['procedures'].message_type = _PROCEDURE
_TRANSACTION.fields_by_name['internal'].message_type = _TRANSACTIONINTERNAL
_TRANSACTION.fields_by_name['code'].message_type = _PROCEDURES
_TRANSACTION.fields_by_name['remaster'].message_type = _REMASTERPROCEDURE
_TRANSACTION.fields_by_name['keys'].message_type = _KEYVALUEENTRY
_TRANSACTION.fields_by_name['status'].enum_type = _TRANSACTIONSTATUS
_TRANSACTION.fields_by_name['abort_code'].enum_type = _ABORTCODE
_TRANSACTION.oneofs_by_name['program'].fields.append(
_TRANSACTION.fields_by_name['code'])
_TRANSACTION.fields_by_name['code'].containing_oneof = _TRANSACTION.oneofs_by_name['program']
_TRANSACTION.oneofs_by_name['program'].fields.append(
_TRANSACTION.fields_by_name['remaster'])
_TRANSACTION.fields_by_name['remaster'].containing_oneof = _TRANSACTION.oneofs_by_name['program']
DESCRIPTOR.message_types_by_name['MasterMetadata'] = _MASTERMETADATA
DESCRIPTOR.message_types_by_name['ValueEntry'] = _VALUEENTRY
DESCRIPTOR.message_types_by_name['KeyValueEntry'] = _KEYVALUEENTRY
DESCRIPTOR.message_types_by_name['TransactionEventInfo'] = _TRANSACTIONEVENTINFO
DESCRIPTOR.message_types_by_name['TransactionInternal'] = _TRANSACTIONINTERNAL
DESCRIPTOR.message_types_by_name['RemasterProcedure'] = _REMASTERPROCEDURE
DESCRIPTOR.message_types_by_name['Procedure'] = _PROCEDURE
DESCRIPTOR.message_types_by_name['Procedures'] = _PROCEDURES
DESCRIPTOR.message_types_by_name['Transaction'] = _TRANSACTION
DESCRIPTOR.enum_types_by_name['TransactionType'] = _TRANSACTIONTYPE
DESCRIPTOR.enum_types_by_name['TransactionStatus'] = _TRANSACTIONSTATUS
DESCRIPTOR.enum_types_by_name['AbortCode'] = _ABORTCODE
DESCRIPTOR.enum_types_by_name['KeyType'] = _KEYTYPE
DESCRIPTOR.enum_types_by_name['TransactionEvent'] = _TRANSACTIONEVENT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
MasterMetadata = _reflection.GeneratedProtocolMessageType('MasterMetadata', (_message.Message,), {
'DESCRIPTOR' : _MASTERMETADATA,
'__module__' : 'proto.transaction_pb2'
# @@protoc_insertion_point(class_scope:slog.MasterMetadata)
})
_sym_db.RegisterMessage(MasterMetadata)
ValueEntry = _reflection.GeneratedProtocolMessageType('ValueEntry', (_message.Message,), {
'DESCRIPTOR' : _VALUEENTRY,
'__module__' : 'proto.transaction_pb2'
# @@protoc_insertion_point(class_scope:slog.ValueEntry)
})
_sym_db.RegisterMessage(ValueEntry)
KeyValueEntry = _reflection.GeneratedProtocolMessageType('KeyValueEntry', (_message.Message,), {
'DESCRIPTOR' : _KEYVALUEENTRY,
'__module__' : 'proto.transaction_pb2'
# @@protoc_insertion_point(class_scope:slog.KeyValueEntry)
})
_sym_db.RegisterMessage(KeyValueEntry)
TransactionEventInfo = _reflection.GeneratedProtocolMessageType('TransactionEventInfo', (_message.Message,), {
'DESCRIPTOR' : _TRANSACTIONEVENTINFO,
'__module__' : 'proto.transaction_pb2'
# @@protoc_insertion_point(class_scope:slog.TransactionEventInfo)
})
_sym_db.RegisterMessage(TransactionEventInfo)
TransactionInternal = _reflection.GeneratedProtocolMessageType('TransactionInternal', (_message.Message,), {
'DESCRIPTOR' : _TRANSACTIONINTERNAL,
'__module__' : 'proto.transaction_pb2'
# @@protoc_insertion_point(class_scope:slog.TransactionInternal)
})
_sym_db.RegisterMessage(TransactionInternal)
RemasterProcedure = _reflection.GeneratedProtocolMessageType('RemasterProcedure', (_message.Message,), {
'DESCRIPTOR' : _REMASTERPROCEDURE,
'__module__' : 'proto.transaction_pb2'
# @@protoc_insertion_point(class_scope:slog.RemasterProcedure)
})
_sym_db.RegisterMessage(RemasterProcedure)
Procedure = _reflection.GeneratedProtocolMessageType('Procedure', (_message.Message,), {
'DESCRIPTOR' : _PROCEDURE,
'__module__' : 'proto.transaction_pb2'
# @@protoc_insertion_point(class_scope:slog.Procedure)
})
_sym_db.RegisterMessage(Procedure)
Procedures = _reflection.GeneratedProtocolMessageType('Procedures', (_message.Message,), {
'DESCRIPTOR' : _PROCEDURES,
'__module__' : 'proto.transaction_pb2'
# @@protoc_insertion_point(class_scope:slog.Procedures)
})
_sym_db.RegisterMessage(Procedures)
Transaction = _reflection.GeneratedProtocolMessageType('Transaction', (_message.Message,), {
'DESCRIPTOR' : _TRANSACTION,
'__module__' : 'proto.transaction_pb2'
# @@protoc_insertion_point(class_scope:slog.Transaction)
})
_sym_db.RegisterMessage(Transaction)
# @@protoc_insertion_point(module_scope)
| [((12, 10, 12, 36), 'google.protobuf.symbol_database.Default', '_symbol_database.Default', ({}, {}), '()', True, 'from google.protobuf import symbol_database as _symbol_database\n'), ((17, 13, 24, 1), 'google.protobuf.descriptor.FileDescriptor', '_descriptor.FileDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((56, 18, 56, 69), 'google.protobuf.internal.enum_type_wrapper.EnumTypeWrapper', 'enum_type_wrapper.EnumTypeWrapper', ({(56, 52, 56, 68): '_TRANSACTIONTYPE'}, {}), '(_TRANSACTIONTYPE)', False, 'from google.protobuf.internal import enum_type_wrapper\n'), ((87, 20, 87, 73), 'google.protobuf.internal.enum_type_wrapper.EnumTypeWrapper', 'enum_type_wrapper.EnumTypeWrapper', ({(87, 54, 87, 72): '_TRANSACTIONSTATUS'}, {}), '(_TRANSACTIONSTATUS)', False, 'from google.protobuf.internal import enum_type_wrapper\n'), ((118, 12, 118, 57), 'google.protobuf.internal.enum_type_wrapper.EnumTypeWrapper', 'enum_type_wrapper.EnumTypeWrapper', ({(118, 46, 118, 56): '_ABORTCODE'}, {}), '(_ABORTCODE)', False, 'from google.protobuf.internal import enum_type_wrapper\n'), ((144, 10, 144, 53), 'google.protobuf.internal.enum_type_wrapper.EnumTypeWrapper', 'enum_type_wrapper.EnumTypeWrapper', ({(144, 44, 144, 52): '_KEYTYPE'}, {}), '(_KEYTYPE)', False, 'from google.protobuf.internal import enum_type_wrapper\n'), ((320, 19, 320, 71), 'google.protobuf.internal.enum_type_wrapper.EnumTypeWrapper', 'enum_type_wrapper.EnumTypeWrapper', ({(320, 53, 320, 70): '_TRANSACTIONEVENT'}, {}), '(_TRANSACTIONEVENT)', False, 'from google.protobuf.internal import enum_type_wrapper\n'), ((898, 17, 902, 4), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', ({(898, 58, 898, 74): '"""MasterMetadata"""', (898, 76, 898, 95): '(_message.Message,)', (898, 97, 902, 3): "{'DESCRIPTOR': _MASTERMETADATA, '__module__': 'proto.transaction_pb2'}"}, {}), "('MasterMetadata', (_message.\n Message,), {'DESCRIPTOR': _MASTERMETADATA, '__module__':\n 'proto.transaction_pb2'})", True, 'from google.protobuf import reflection as _reflection\n'), ((905, 13, 909, 4), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', ({(905, 54, 905, 66): '"""ValueEntry"""', (905, 68, 905, 87): '(_message.Message,)', (905, 89, 909, 3): "{'DESCRIPTOR': _VALUEENTRY, '__module__': 'proto.transaction_pb2'}"}, {}), "('ValueEntry', (_message.Message,),\n {'DESCRIPTOR': _VALUEENTRY, '__module__': 'proto.transaction_pb2'})", True, 'from google.protobuf import reflection as _reflection\n'), ((912, 16, 916, 4), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', ({(912, 57, 912, 72): '"""KeyValueEntry"""', (912, 74, 912, 93): '(_message.Message,)', (912, 95, 916, 3): "{'DESCRIPTOR': _KEYVALUEENTRY, '__module__': 'proto.transaction_pb2'}"}, {}), "('KeyValueEntry', (_message.Message\n ,), {'DESCRIPTOR': _KEYVALUEENTRY, '__module__': 'proto.transaction_pb2'})", True, 'from google.protobuf import reflection as _reflection\n'), ((919, 23, 923, 4), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', ({(919, 64, 919, 86): '"""TransactionEventInfo"""', (919, 88, 919, 107): '(_message.Message,)', (919, 109, 923, 3): "{'DESCRIPTOR': _TRANSACTIONEVENTINFO, '__module__': 'proto.transaction_pb2'}"}, {}), "('TransactionEventInfo', (_message.\n Message,), {'DESCRIPTOR': _TRANSACTIONEVENTINFO, '__module__':\n 'proto.transaction_pb2'})", True, 'from google.protobuf import reflection as _reflection\n'), ((926, 22, 930, 4), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', ({(926, 63, 926, 84): '"""TransactionInternal"""', (926, 86, 926, 105): '(_message.Message,)', (926, 107, 930, 3): "{'DESCRIPTOR': _TRANSACTIONINTERNAL, '__module__': 'proto.transaction_pb2'}"}, {}), "('TransactionInternal', (_message.\n Message,), {'DESCRIPTOR': _TRANSACTIONINTERNAL, '__module__':\n 'proto.transaction_pb2'})", True, 'from google.protobuf import reflection as _reflection\n'), ((933, 20, 937, 4), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', ({(933, 61, 933, 80): '"""RemasterProcedure"""', (933, 82, 933, 101): '(_message.Message,)', (933, 103, 937, 3): "{'DESCRIPTOR': _REMASTERPROCEDURE, '__module__': 'proto.transaction_pb2'}"}, {}), "('RemasterProcedure', (_message.\n Message,), {'DESCRIPTOR': _REMASTERPROCEDURE, '__module__':\n 'proto.transaction_pb2'})", True, 'from google.protobuf import reflection as _reflection\n'), ((940, 12, 944, 4), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', ({(940, 53, 940, 64): '"""Procedure"""', (940, 66, 940, 85): '(_message.Message,)', (940, 87, 944, 3): "{'DESCRIPTOR': _PROCEDURE, '__module__': 'proto.transaction_pb2'}"}, {}), "('Procedure', (_message.Message,),\n {'DESCRIPTOR': _PROCEDURE, '__module__': 'proto.transaction_pb2'})", True, 'from google.protobuf import reflection as _reflection\n'), ((947, 13, 951, 4), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', ({(947, 54, 947, 66): '"""Procedures"""', (947, 68, 947, 87): '(_message.Message,)', (947, 89, 951, 3): "{'DESCRIPTOR': _PROCEDURES, '__module__': 'proto.transaction_pb2'}"}, {}), "('Procedures', (_message.Message,),\n {'DESCRIPTOR': _PROCEDURES, '__module__': 'proto.transaction_pb2'})", True, 'from google.protobuf import reflection as _reflection\n'), ((954, 14, 958, 4), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', ({(954, 55, 954, 68): '"""Transaction"""', (954, 70, 954, 89): '(_message.Message,)', (954, 91, 958, 3): "{'DESCRIPTOR': _TRANSACTION, '__module__': 'proto.transaction_pb2'}"}, {}), "('Transaction', (_message.Message,),\n {'DESCRIPTOR': _TRANSACTION, '__module__': 'proto.transaction_pb2'})", True, 'from google.protobuf import reflection as _reflection\n'), ((33, 4, 37, 50), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((38, 4, 42, 50), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((43, 4, 47, 50), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((64, 4, 68, 50), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((69, 4, 73, 50), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((74, 4, 78, 50), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((95, 4, 99, 50), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((100, 4, 104, 50), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((105, 4, 109, 50), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((126, 4, 130, 50), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((131, 4, 135, 50), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((152, 4, 156, 50), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((157, 4, 161, 50), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((162, 4, 166, 50), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((167, 4, 171, 50), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((172, 4, 176, 50), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((177, 4, 181, 50), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((182, 4, 186, 50), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((187, 4, 191, 50), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((192, 4, 196, 50), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((197, 4, 201, 50), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((202, 4, 206, 50), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((207, 4, 211, 50), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((212, 4, 216, 50), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((217, 4, 221, 50), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((222, 4, 226, 50), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((227, 4, 231, 50), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((232, 4, 236, 50), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((237, 4, 241, 50), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((242, 4, 246, 50), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((247, 4, 251, 50), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((252, 4, 256, 50), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((257, 4, 261, 50), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((262, 4, 266, 50), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((267, 4, 271, 50), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((272, 4, 276, 50), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((277, 4, 281, 50), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((282, 4, 286, 50), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((287, 4, 291, 50), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((292, 4, 296, 50), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((297, 4, 301, 50), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((302, 4, 306, 50), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((307, 4, 311, 50), 'google.protobuf.descriptor.EnumValueDescriptor', '_descriptor.EnumValueDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((375, 4, 381, 93), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((382, 4, 388, 93), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((414, 4, 420, 93), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((421, 4, 427, 93), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((428, 4, 434, 93), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((435, 4, 441, 93), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((453, 4, 457, 14), 'google.protobuf.descriptor.OneofDescriptor', '_descriptor.OneofDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((472, 4, 478, 93), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((479, 4, 485, 93), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((511, 4, 517, 93), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((518, 4, 524, 93), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((525, 4, 531, 93), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((532, 4, 538, 93), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((564, 4, 570, 93), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((571, 4, 577, 93), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((578, 4, 584, 93), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((585, 4, 591, 93), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((592, 4, 598, 93), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((599, 4, 605, 93), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((606, 4, 612, 93), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((613, 4, 619, 93), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((620, 4, 626, 93), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((627, 4, 633, 93), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((634, 4, 640, 93), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((641, 4, 647, 93), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((648, 4, 654, 93), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((680, 4, 686, 93), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((687, 4, 693, 93), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((719, 4, 725, 93), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((751, 4, 757, 93), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((783, 4, 789, 93), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((790, 4, 796, 93), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((797, 4, 803, 93), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((804, 4, 810, 93), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((811, 4, 817, 93), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((818, 4, 824, 93), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((825, 4, 831, 93), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((850, 4, 854, 14), 'google.protobuf.descriptor.OneofDescriptor', '_descriptor.OneofDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n')] |
google/simple-reinforcement-learning | srl/simulation_test.py | 9bdac29427cd5c556d7ea7531b807645f043aae3 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import unittest
from srl import movement
from srl import simulation
from srl import world
class TestSimulation(unittest.TestCase):
def test_in_terminal_state(self):
w = world.World.parse('@^')
sim = simulation.Simulation(world.Static(w))
self.assertFalse(sim.in_terminal_state)
sim.act(movement.ACTION_RIGHT)
self.assertTrue(sim.in_terminal_state)
def test_act_accumulates_score(self):
w = world.World.parse('@.')
sim = simulation.Simulation(world.Static(w))
sim.act(movement.ACTION_RIGHT)
sim.act(movement.ACTION_LEFT)
self.assertEqual(-2, sim.score)
def test_to_array(self):
w = world.World.parse('$.@^#')
sim = simulation.Simulation(world.Static(w))
self.assertTrue(
(np.array([[2, 3, 4, 5, 1]], dtype=np.int8) == sim.to_array())
.all())
| [((25, 8, 25, 31), 'srl.world.World.parse', 'world.World.parse', ({(25, 26, 25, 30): '"""@^"""'}, {}), "('@^')", False, 'from srl import world\n'), ((32, 8, 32, 31), 'srl.world.World.parse', 'world.World.parse', ({(32, 26, 32, 30): '"""@."""'}, {}), "('@.')", False, 'from srl import world\n'), ((39, 8, 39, 34), 'srl.world.World.parse', 'world.World.parse', ({(39, 26, 39, 33): '"""$.@^#"""'}, {}), "('$.@^#')", False, 'from srl import world\n'), ((26, 32, 26, 47), 'srl.world.Static', 'world.Static', ({(26, 45, 26, 46): 'w'}, {}), '(w)', False, 'from srl import world\n'), ((33, 32, 33, 47), 'srl.world.Static', 'world.Static', ({(33, 45, 33, 46): 'w'}, {}), '(w)', False, 'from srl import world\n'), ((40, 32, 40, 47), 'srl.world.Static', 'world.Static', ({(40, 45, 40, 46): 'w'}, {}), '(w)', False, 'from srl import world\n'), ((42, 7, 42, 49), 'numpy.array', 'np.array', (), '', True, 'import numpy as np\n')] |
Tejvinder/thesis-ghidra | src/xmltollvm.py | 2e59bc48d6bb820ecf6b390e5cf5893fc6ea0216 | from llvmlite import ir
import xml.etree.ElementTree as et
int32 = ir.IntType(32)
int64 = ir.IntType(64)
int1 = ir.IntType(1)
void_type = ir.VoidType()
function_names = []
registers, functions, uniques, extracts = {}, {}, {}, {}
internal_functions = {}
memory = {}
flags = ["ZF", "CF", "OF", "SF"]
pointers = ["RSP", "RIP", "RBP", "EBP", "ESP"]
def lift(filename):
root = et.parse(filename).getroot()
module = ir.Module(name="lifted")
for register in root.find('globals').findall('register'):
if register.get('name') in flags:
var = ir.GlobalVariable(module, ir.IntType(1), register.get('name'))
var.initializer = ir.Constant(ir.IntType(1), None)
var.linkage = 'internal'
registers[register.get('name')] = var
elif register.get('name') in pointers:
var = ir.GlobalVariable(module, ir.PointerType(ir.IntType(8)), register.get('name'))
var.initializer = ir.Constant(ir.PointerType(ir.IntType(8)), None)
var.linkage = 'internal'
registers[register.get('name')] = var
else:
var = ir.GlobalVariable(module, ir.IntType(8 * int(register.get('size'))), register.get('name'))
var.initializer = ir.Constant(ir.IntType(8 * int(register.get('size'))), None)
var.linkage = 'internal'
registers[register.get('name')] = var
for memory_location in root.find('memory').findall('memory'):
var = ir.GlobalVariable(module, ir.IntType(8 * int(memory_location.get('size'))), memory_location.get('name'))
var.initializer = ir.Constant(ir.IntType(8 * int(memory_location.get('size'))), None)
var.linkage = 'internal'
memory[memory_location.get('name')] = var
func_return = ir.VoidType()
fnty = ir.FunctionType(func_return, [])
ir_func = ir.Function(module, fnty, "intra_function_branch")
internal_functions["intra_function_branch"] = ir_func
func_return = ir.VoidType()
fnty = ir.FunctionType(func_return, [])
ir_func = ir.Function(module, fnty, "call_indirect")
internal_functions["call_indirect"] = ir_func
func_return = ir.VoidType()
fnty = ir.FunctionType(func_return, [])
ir_func = ir.Function(module, fnty, "bit_extraction")
internal_functions["bit_extraction"] = ir_func
for function in root.findall('function'):
name = function.get('name')
x = 1
while name in function_names:
name = name + "_" + str(x)
x += 1
function_names.append(name)
address = function.get('address')
functions[address] = [build_function(name, module), function]
for address in functions:
ir_func, function = functions[address]
populate_func(ir_func, function)
return module
def populate_func(ir_func, function):
builders, blocks = build_cfg(function, ir_func)
if blocks == {}:
return
populate_cfg(function, builders, blocks)
def build_function(name, module):
func_return = ir.VoidType()
fnty = ir.FunctionType(func_return, [])
ir_func = ir.Function(module, fnty, name)
return ir_func
def build_cfg(function, ir_func):
builders, blocks = {}, {}
instructions = function.find("instructions")
if instructions:
block = ir_func.append_basic_block("entry")
blocks["entry"] = block
builders["entry"] = ir.IRBuilder(block)
for instruction in instructions:
address = instruction.find("address").text
block = ir_func.append_basic_block(address)
blocks[address] = block
builders[address] = ir.IRBuilder(block)
return builders, blocks
# noinspection DuplicatedCode
def populate_cfg(function, builders, blocks):
builder = builders["entry"]
stack_size = 10 * 1024 * 1024
stack = builder.alloca(ir.IntType(8), stack_size, name="stack")
stack_top = builder.gep(stack, [ir.Constant(int64, stack_size - 8)], name="stack_top")
builder.store(stack_top, registers["RSP"])
builder.branch(list(blocks.values())[1])
block_iterator = 1
instr = 0
quiter = False
for instruction in function.find("instructions"):
if quiter:
break
address = instruction.find("address").text
if address in builders:
builder = builders[address]
pcodes = instruction.find("pcodes")
pc = 0
no_branch = True
for pcode in pcodes:
pc += 1
mnemonic = pcode.find("name")
if mnemonic.text == "COPY":
output = pcode.find("output")
if output.text in flags and pcode.find("input_0").get("storage") == "constant":
source = ir.Constant(ir.IntType(1), int(pcode.find("input_0").text, 0))
else:
source = fetch_input_varnode(builder, pcode.find("input_0"))
update_output(builder, pcode.find("output"), source)
elif mnemonic.text == "LOAD":
input_1 = pcode.find("input_1")
output = pcode.find("output")
rhs = fetch_input_varnode(builder, input_1)
if input_1.get("storage") == "unique" and output.get("storage") == "unique":
# This is incorrect. This is treating it as a copy, should load the memory address in the input 1
update_output(builder, output, rhs)
else:
if input_1.text in pointers:
rhs = builder.gep(rhs, [ir.Constant(int64, 0)])
result = builder.load(rhs)
update_output(builder, output, result)
elif mnemonic.text == "STORE":
input_1 = pcode.find("input_1") # target
input_2 = pcode.find("input_2") # source
rhs = fetch_input_varnode(builder, input_2)
lhs = fetch_output_varnode(input_1)
lhs2 = builder.gep(lhs, [ir.Constant(int64, 0)])
if lhs2.type != rhs.type.as_pointer():
lhs2 = builder.bitcast(lhs2, rhs.type.as_pointer())
builder.store(rhs, lhs2)
elif mnemonic.text == "BRANCH":
value = pcode.find("input_0").text[2:-2]
if value in functions:
target = functions[value][0]
builder.call(target, [])
elif value in blocks:
target = blocks[value]
builder.branch(target)
no_branch = False
else:
# weird jump into some label in another function
# might be solved with callbr instruction?
builder.call(internal_functions["intra_function_branch"], [])
elif mnemonic.text == "CBRANCH":
true_target = blocks[pcode.find("input_0").text[2:-2]]
false_target = list(blocks.values())[block_iterator + 1]
condition = fetch_input_varnode(builder, pcode.find("input_1"))
no_branch = False
builder.cbranch(condition, true_target, false_target)
elif mnemonic.text == "BRANCHIND":
no_branch = False
target = fetch_input_varnode(builder, pcode.find("input_0"))
if not target.type.is_pointer:
target = builder.inttoptr(target, target.type.as_pointer())
builder.branch_indirect(target)
elif mnemonic.text == "CALL":
target = functions[pcode.find("input_0").text[2:-2]][0]
builder.call(target, [])
elif mnemonic.text == "CALLIND":
# target = pcode.find("input_0").text[2:-2]
builder.call(internal_functions["call_indirect"], [])
elif mnemonic.text == "USERDEFINED":
raise Exception("Not implemented")
elif mnemonic.text == "RETURN":
input_1 = pcode.find("input_1")
no_branch = False
if input_1 is None:
builder.ret_void()
else:
raise Exception("Return value being passed")
elif mnemonic.text == "PIECE":
raise Exception("PIECE operation needs to be tested")
elif mnemonic.text == "SUBPIECE":
output = pcode.find("output")
input_0 = pcode.find("input_0")
input_1 = pcode.find("input_1")
if input_1.text == "0x0":
val = fetch_input_varnode(builder, input_0)
result = builder.trunc(val, ir.IntType(int(output.get("size")) * 8))
update_output(builder, output, result)
else:
builder.call(internal_functions['bit_extraction'], [])
elif mnemonic.text == "INT_EQUAL":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.icmp_unsigned('==', lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_NOTEQUAL":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.icmp_unsigned('!=', lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_LESS":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.icmp_unsigned('<', lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_SLESS":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.icmp_signed('<', lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_LESSEQUAL":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.icmp_unsigned('<=', lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_SLESS_EQUAL":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.icmp_signed('<=', lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_ZEXT":
rhs = fetch_input_varnode(builder, pcode.find("input_0"))
if rhs.type.is_pointer:
rhs = builder.ptrtoint(rhs, rhs.type.pointee)
output = builder.zext(rhs, ir.IntType(int(pcode.find("output").get("size")) * 8))
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_SEXT":
rhs = fetch_input_varnode(builder, pcode.find("input_0"))
if rhs.type.is_pointer:
rhs = builder.ptrtoint(rhs, rhs.type.pointee)
output = builder.sext(rhs, ir.IntType(int(pcode.find("output").get("size")) * 8))
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_ADD":
input_0 = pcode.find("input_0")
input_1 = pcode.find("input_1")
lhs = fetch_input_varnode(builder, input_0)
rhs = fetch_input_varnode(builder, input_1)
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
if input_0.text in pointers and input_1.get("storage") == "constant":
result = builder.gep(lhs, [ir.Constant(int64, int(input_1.text, 16))])
else:
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
result = builder.add(lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_SUB":
input_0 = pcode.find("input_0")
input_1 = pcode.find("input_1")
lhs = fetch_input_varnode(builder, input_0)
rhs = fetch_input_varnode(builder, input_1)
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
if input_0.text in pointers and input_1.get("storage") == "constant":
result = builder.gep(lhs, [ir.Constant(int64, -int(input_1.text, 16))])
else:
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
result = builder.sub(lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_CARRY":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.uadd_with_overflow(lhs, rhs)
result = builder.extract_value(result, 1)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_SCARRY":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.sadd_with_overflow(lhs, rhs)
result = builder.extract_value(result, 1)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_SBORROW":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.sadd_with_overflow(lhs, rhs)
result = builder.extract_value(result, 1)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_2COMP":
val = fetch_input_varnode(builder, pcode.find("input_0"))
result = builder.not_(val)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_NEGATE":
val = fetch_input_varnode(builder, pcode.find("input_0"))
result = builder.neg(val)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_XOR":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.xor(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_AND":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.and_(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_OR":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.or_(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_LEFT":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = check_shift_inputs(builder, lhs, rhs, target)
output = builder.shl(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_RIGHT":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = check_shift_inputs(builder, lhs, rhs, target)
output = builder.lshr(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_SRIGHT":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = check_shift_inputs(builder, lhs, rhs, target)
output = builder.ashr(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_MULT":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.mul(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_DIV":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.div(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_REM":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.urem(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_SDIV":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.sdiv(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_SREM":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.srem(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "BOOL_NEGATE":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
result = builder.neg(lhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "BOOL_XOR":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
result = builder.xor(lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "BOOL_AND":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
result = builder.and_(lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "BOOL_OR":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
result = builder.or_(lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "FLOAT_EQUAL":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_NOTEQUAL":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_LESS":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_LESSEQUAL":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_ADD":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_SUB":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_MULT":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_DIV":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_NEG":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_ABS":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_SQRT":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_CEIL":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_FLOOR":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_ROUND":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_NAN":
raise Exception("Not implemented")
elif mnemonic.text == "INT2FLOAT":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT2FLOAT":
raise Exception("Not implemented")
elif mnemonic.text == "TRUNC":
raise Exception("Not implemented")
elif mnemonic.text == "CPOOLREF":
raise Exception("Not implemented")
elif mnemonic.text == "NEW":
raise Exception("Not implemented")
elif mnemonic.text == "MULTIEQUAL":
raise Exception("Not implemented")
elif mnemonic.text == "INDIRECT":
raise Exception("Not implemented")
elif mnemonic.text == "PTRADD":
raise Exception("Not implemented")
elif mnemonic.text == "PTRSUB":
raise Exception("Not implemented")
elif mnemonic.text == "CAST":
raise Exception("Not implemented")
else:
raise Exception("Not a standard pcode instruction")
block_iterator += 1
instr += 1
if block_iterator < len(blocks) and no_branch:
builder.branch(list(blocks.values())[block_iterator])
def fetch_input_varnode(builder, name):
var_type = name.get("storage")
var_size = int(name.get("size")) * 8
if var_type == "register":
return builder.load(registers[name.text])
elif var_type == "unique":
if name.text not in list(uniques.keys()):
raise Exception("Temporary variable referenced before defined")
return uniques[name.text]
elif var_type == "constant":
var = ir.Constant(ir.IntType(var_size), int(name.text, 0))
return var
elif var_type == "memory":
return memory[name.text]
def update_output(builder, name, output):
var_type = name.get("storage")
if var_type == "register":
reg = registers[name.text]
if reg.type != output.type.as_pointer():
reg = builder.bitcast(reg, output.type.as_pointer())
builder.store(output, reg)
elif var_type == "unique":
uniques[name.text] = output
def fetch_output_varnode(name):
var_type = name.get("storage")
if var_type == "register":
return registers[name.text]
elif var_type == "unique":
if name.text not in uniques:
uniques[name.text] = None
return uniques[name.text]
def int_check_inputs(builder, lhs, rhs, target):
if lhs.type != target:
if lhs.type.is_pointer:
lhs2 = lhs
lhs = builder.ptrtoint(lhs, target)
if lhs2 == rhs:
rhs = lhs
if rhs.type != target and lhs != rhs:
if rhs.type.is_pointer:
rhs = builder.ptrtoint(rhs, target)
return lhs, rhs
def check_shift_inputs(builder, lhs, rhs, target):
if lhs.type != target:
if lhs.type.is_pointer:
lhs = builder.ptrtoint(lhs, target)
else:
lhs = builder.zext(lhs, target)
if rhs.type != target:
if rhs.type.is_pointer:
rhs = builder.ptrtoint(rhs, target)
else:
rhs = builder.zext(rhs, target)
return lhs, rhs
def int_comparison_check_inputs(builder, lhs, rhs):
# For integer comparison operations. We assume rhs is the correct type.
if lhs.type.is_pointer:
lhs = builder.ptrtoint(lhs, rhs.type)
return lhs, rhs | [((4, 8, 4, 22), 'llvmlite.ir.IntType', 'ir.IntType', ({(4, 19, 4, 21): '32'}, {}), '(32)', False, 'from llvmlite import ir\n'), ((5, 8, 5, 22), 'llvmlite.ir.IntType', 'ir.IntType', ({(5, 19, 5, 21): '64'}, {}), '(64)', False, 'from llvmlite import ir\n'), ((6, 7, 6, 20), 'llvmlite.ir.IntType', 'ir.IntType', ({(6, 18, 6, 19): '1'}, {}), '(1)', False, 'from llvmlite import ir\n'), ((7, 12, 7, 25), 'llvmlite.ir.VoidType', 'ir.VoidType', ({}, {}), '()', False, 'from llvmlite import ir\n'), ((18, 13, 18, 37), 'llvmlite.ir.Module', 'ir.Module', (), '', False, 'from llvmlite import ir\n'), ((43, 18, 43, 31), 'llvmlite.ir.VoidType', 'ir.VoidType', ({}, {}), '()', False, 'from llvmlite import ir\n'), ((44, 11, 44, 43), 'llvmlite.ir.FunctionType', 'ir.FunctionType', ({(44, 27, 44, 38): 'func_return', (44, 40, 44, 42): '[]'}, {}), '(func_return, [])', False, 'from llvmlite import ir\n'), ((45, 14, 45, 64), 'llvmlite.ir.Function', 'ir.Function', ({(45, 26, 45, 32): 'module', (45, 34, 45, 38): 'fnty', (45, 40, 45, 63): '"""intra_function_branch"""'}, {}), "(module, fnty, 'intra_function_branch')", False, 'from llvmlite import ir\n'), ((48, 18, 48, 31), 'llvmlite.ir.VoidType', 'ir.VoidType', ({}, {}), '()', False, 'from llvmlite import ir\n'), ((49, 11, 49, 43), 'llvmlite.ir.FunctionType', 'ir.FunctionType', ({(49, 27, 49, 38): 'func_return', (49, 40, 49, 42): '[]'}, {}), '(func_return, [])', False, 'from llvmlite import ir\n'), ((50, 14, 50, 56), 'llvmlite.ir.Function', 'ir.Function', ({(50, 26, 50, 32): 'module', (50, 34, 50, 38): 'fnty', (50, 40, 50, 55): '"""call_indirect"""'}, {}), "(module, fnty, 'call_indirect')", False, 'from llvmlite import ir\n'), ((53, 18, 53, 31), 'llvmlite.ir.VoidType', 'ir.VoidType', ({}, {}), '()', False, 'from llvmlite import ir\n'), ((54, 11, 54, 43), 'llvmlite.ir.FunctionType', 'ir.FunctionType', ({(54, 27, 54, 38): 'func_return', (54, 40, 54, 42): '[]'}, {}), '(func_return, [])', False, 'from llvmlite import ir\n'), ((55, 14, 55, 57), 'llvmlite.ir.Function', 'ir.Function', ({(55, 26, 55, 32): 'module', (55, 34, 55, 38): 'fnty', (55, 40, 55, 56): '"""bit_extraction"""'}, {}), "(module, fnty, 'bit_extraction')", False, 'from llvmlite import ir\n'), ((83, 18, 83, 31), 'llvmlite.ir.VoidType', 'ir.VoidType', ({}, {}), '()', False, 'from llvmlite import ir\n'), ((84, 11, 84, 43), 'llvmlite.ir.FunctionType', 'ir.FunctionType', ({(84, 27, 84, 38): 'func_return', (84, 40, 84, 42): '[]'}, {}), '(func_return, [])', False, 'from llvmlite import ir\n'), ((85, 14, 85, 45), 'llvmlite.ir.Function', 'ir.Function', ({(85, 26, 85, 32): 'module', (85, 34, 85, 38): 'fnty', (85, 40, 85, 44): 'name'}, {}), '(module, fnty, name)', False, 'from llvmlite import ir\n'), ((95, 28, 95, 47), 'llvmlite.ir.IRBuilder', 'ir.IRBuilder', ({(95, 41, 95, 46): 'block'}, {}), '(block)', False, 'from llvmlite import ir\n'), ((108, 27, 108, 40), 'llvmlite.ir.IntType', 'ir.IntType', ({(108, 38, 108, 39): '8'}, {}), '(8)', False, 'from llvmlite import ir\n'), ((17, 11, 17, 29), 'xml.etree.ElementTree.parse', 'et.parse', ({(17, 20, 17, 28): 'filename'}, {}), '(filename)', True, 'import xml.etree.ElementTree as et\n'), ((100, 32, 100, 51), 'llvmlite.ir.IRBuilder', 'ir.IRBuilder', ({(100, 45, 100, 50): 'block'}, {}), '(block)', False, 'from llvmlite import ir\n'), ((109, 36, 109, 70), 'llvmlite.ir.Constant', 'ir.Constant', ({(109, 48, 109, 53): 'int64', (109, 55, 109, 69): 'stack_size - 8'}, {}), '(int64, stack_size - 8)', False, 'from llvmlite import ir\n'), ((22, 44, 22, 57), 'llvmlite.ir.IntType', 'ir.IntType', ({(22, 55, 22, 56): '1'}, {}), '(1)', False, 'from llvmlite import ir\n'), ((23, 42, 23, 55), 'llvmlite.ir.IntType', 'ir.IntType', ({(23, 53, 23, 54): '1'}, {}), '(1)', False, 'from llvmlite import ir\n'), ((472, 26, 472, 46), 'llvmlite.ir.IntType', 'ir.IntType', ({(472, 37, 472, 45): 'var_size'}, {}), '(var_size)', False, 'from llvmlite import ir\n'), ((27, 59, 27, 72), 'llvmlite.ir.IntType', 'ir.IntType', ({(27, 70, 27, 71): '8'}, {}), '(8)', False, 'from llvmlite import ir\n'), ((28, 57, 28, 70), 'llvmlite.ir.IntType', 'ir.IntType', ({(28, 68, 28, 69): '8'}, {}), '(8)', False, 'from llvmlite import ir\n'), ((130, 41, 130, 54), 'llvmlite.ir.IntType', 'ir.IntType', ({(130, 52, 130, 53): '1'}, {}), '(1)', False, 'from llvmlite import ir\n'), ((151, 41, 151, 62), 'llvmlite.ir.Constant', 'ir.Constant', ({(151, 53, 151, 58): 'int64', (151, 60, 151, 61): '0'}, {}), '(int64, 0)', False, 'from llvmlite import ir\n'), ((143, 48, 143, 69), 'llvmlite.ir.Constant', 'ir.Constant', ({(143, 60, 143, 65): 'int64', (143, 67, 143, 68): '0'}, {}), '(int64, 0)', False, 'from llvmlite import ir\n')] |
Farz7/Darkness | modules/WPSeku/modules/discovery/generic/wplisting.py | 4f3eb5fee3d8a476d001ad319ca22bca274eeac9 | #/usr/bin/env python
# -*- Coding: UTF-8 -*-
#
# WPSeku: Wordpress Security Scanner
#
# @url: https://github.com/m4ll0k/WPSeku
# @author: Momo Outaadi (M4ll0k)
import re
from lib import wphttp
from lib import wpprint
class wplisting:
chk = wphttp.UCheck()
out = wpprint.wpprint()
def __init__(self,agent,proxy,redir,time,url,cookie):
self.url = url
self.cookie = cookie
self.req = wphttp.wphttp(
agent=agent,proxy=proxy,
redir=redir,time=time
)
def run(self):
paths = ['/wp-admin','/wp-includes','/wp-content/uploads',
'/wp-content/plugins','/wp-content/themes'
]
try:
for path in paths:
url = wplisting.chk.path(self.url,path)
resp = self.req.send(url,c=self.cookie)
if resp.status_code == 200 and resp._content != None:
if resp.url == url:
wplisting.out.plus('Dir {} listing enabled under: {}'.format(path,resp.url))
except Exception,e:
pass | [] |
toscawidgets/tw2.jit | tw2/jit/widgets/__init__.py | c5e8059975115385f225029ba5c7380673524122 |
from tw2.jit.widgets.chart import (AreaChart, BarChart, PieChart)
from tw2.jit.widgets.graph import (ForceDirectedGraph, RadialGraph)
from tw2.jit.widgets.tree import (SpaceTree, HyperTree, Sunburst,
Icicle, TreeMap)
from tw2.jit.widgets.ajax import AjaxRadialGraph
from tw2.jit.widgets.sqla import SQLARadialGraph
| [] |
tiianprb/TikTok-Downloader-Bot | bot.py | 91b6fd64d5a151c3e439772c69850a18b7562ceb | import json, requests, os, shlex, asyncio, uuid, shutil
from typing import Tuple
from pyrogram import Client, filters
from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup, CallbackQuery
# Configs
API_HASH = os.environ['API_HASH']
APP_ID = int(os.environ['APP_ID'])
BOT_TOKEN = os.environ['BOT_TOKEN']
downloads = './downloads/{}/'
#Button
START_BUTTONS=[
[
InlineKeyboardButton('Source', url='https://github.com/X-Gorn/TikTokDL'),
InlineKeyboardButton('Project Channel', url='https://t.me/xTeamBots'),
],
[InlineKeyboardButton('Author', url='https://t.me/xgorn')],
]
DL_BUTTONS=[
[
InlineKeyboardButton('No Watermark', callback_data='nowm'),
InlineKeyboardButton('Watermark', callback_data='wm'),
],
[InlineKeyboardButton('Audio', callback_data='audio')],
]
# Running bot
xbot = Client('TikTokDL', api_id=APP_ID, api_hash=API_HASH, bot_token=BOT_TOKEN)
# Helpers
# Thanks to FridayUB
async def run_cmd(cmd: str) -> Tuple[str, str, int, int]:
args = shlex.split(cmd)
process = await asyncio.create_subprocess_exec(
*args, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
stdout, stderr = await process.communicate()
return (
stdout.decode("utf-8", "replace").strip(),
stderr.decode("utf-8", "replace").strip(),
process.returncode,
process.pid,
)
# Start
@xbot.on_message(filters.command('start') & filters.private)
async def _start(bot, update):
await update.reply_text(f"I'm TikTokDL!\nYou can download tiktok video/audio using this bot", True, reply_markup=InlineKeyboardMarkup(START_BUTTONS))
# Downloader for tiktok
@xbot.on_message(filters.regex(pattern='.*http.*') & filters.private)
async def _tiktok(bot, update):
url = update.text
session = requests.Session()
resp = session.head(url, allow_redirects=True)
if not 'tiktok.com' in resp.url:
return
await update.reply('Select the options below', True, reply_markup=InlineKeyboardMarkup(DL_BUTTONS))
# Callbacks
@xbot.on_callback_query()
async def _callbacks(bot, cb: CallbackQuery):
if cb.data == 'nowm':
dirs = downloads.format(uuid.uuid4().hex)
os.makedirs(dirs)
cbb = cb
update = cbb.message.reply_to_message
await cb.message.delete()
url = update.text
session = requests.Session()
resp = session.head(url, allow_redirects=True)
if '?' in resp.url:
tt = resp.url.split('?', 1)[0]
else:
tt = resp.url
ttid = dirs+tt.split('/')[-1]
r = requests.get('https://api.reiyuura.me/api/dl/tiktok?url='+tt)
result = r.text
rs = json.loads(result)
link = rs['result']['nowm']
resp = session.head(link, allow_redirects=True)
r = requests.get(resp.url, allow_redirects=True)
open(f'{ttid}.mp4', 'wb').write(r.content)
await bot.send_video(update.chat.id, f'{ttid}.mp4',)
shutil.rmtree(dirs)
elif cb.data == 'wm':
dirs = downloads.format(uuid.uuid4().hex)
os.makedirs(dirs)
cbb = cb
update = cbb.message.reply_to_message
await cb.message.delete()
url = update.text
session = requests.Session()
resp = session.head(url, allow_redirects=True)
if '?' in resp.url:
tt = resp.url.split('?', 1)[0]
else:
tt = resp.url
ttid = dirs+tt.split('/')[-1]
r = requests.get('https://api.reiyuura.me/api/dl/tiktok?url='+tt)
result = r.text
rs = json.loads(result)
link = rs['result']['wm']
resp = session.head(link, allow_redirects=True)
r = requests.get(resp.url, allow_redirects=True)
open(f'{ttid}.mp4', 'wb').write(r.content)
await bot.send_video(update.chat.id, f'{ttid}.mp4',)
shutil.rmtree(dirs)
elif cb.data == 'audio':
dirs = downloads.format(uuid.uuid4().hex)
os.makedirs(dirs)
cbb = cb
update = cbb.message.reply_to_message
await cb.message.delete()
url = update.text
session = requests.Session()
resp = session.head(url, allow_redirects=True)
if '?' in resp.url:
tt = resp.url.split('?', 1)[0]
else:
tt = resp.url
ttid = dirs+tt.split('/')[-1]
r = requests.get('https://api.reiyuura.me/api/dl/tiktok?url='+tt)
result = r.text
rs = json.loads(result)
link = rs['result']['wm']
resp = session.head(link, allow_redirects=True)
r = requests.get(resp.url, allow_redirects=True)
open(f'{ttid}.mp4', 'wb').write(r.content)
cmd = f'ffmpeg -i "{ttid}.mp4" -vn -ar 44100 -ac 2 -ab 192 -f mp3 "{ttid}.mp3"'
await run_cmd(cmd)
await bot.send_audio(update.chat.id, f'{ttid}.mp3',)
shutil.rmtree(dirs)
xbot.run()
| [((31, 7, 31, 80), 'pyrogram.Client', 'Client', (), '', False, 'from pyrogram import Client, filters\n'), ((37, 9, 37, 25), 'shlex.split', 'shlex.split', ({(37, 21, 37, 24): 'cmd'}, {}), '(cmd)', False, 'import json, requests, os, shlex, asyncio, uuid, shutil\n'), ((58, 12, 58, 30), 'requests.Session', 'requests.Session', ({}, {}), '()', False, 'import json, requests, os, shlex, asyncio, uuid, shutil\n'), ((15, 8, 15, 80), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', (), '', False, 'from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup, CallbackQuery\n'), ((16, 8, 16, 77), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', (), '', False, 'from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup, CallbackQuery\n'), ((18, 5, 18, 61), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', (), '', False, 'from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup, CallbackQuery\n'), ((23, 8, 23, 66), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', (), '', False, 'from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup, CallbackQuery\n'), ((24, 8, 24, 61), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', (), '', False, 'from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup, CallbackQuery\n'), ((26, 5, 26, 57), 'pyrogram.types.InlineKeyboardButton', 'InlineKeyboardButton', (), '', False, 'from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup, CallbackQuery\n'), ((38, 18, 40, 3), 'asyncio.create_subprocess_exec', 'asyncio.create_subprocess_exec', (), '', False, 'import json, requests, os, shlex, asyncio, uuid, shutil\n'), ((50, 17, 50, 41), 'pyrogram.filters.command', 'filters.command', ({(50, 33, 50, 40): '"""start"""'}, {}), "('start')", False, 'from pyrogram import Client, filters\n'), ((55, 17, 55, 50), 'pyrogram.filters.regex', 'filters.regex', (), '', False, 'from pyrogram import Client, filters\n'), ((69, 4, 69, 21), 'os.makedirs', 'os.makedirs', ({(69, 16, 69, 20): 'dirs'}, {}), '(dirs)', False, 'import json, requests, os, shlex, asyncio, uuid, shutil\n'), ((74, 14, 74, 32), 'requests.Session', 'requests.Session', ({}, {}), '()', False, 'import json, requests, os, shlex, asyncio, uuid, shutil\n'), ((81, 8, 81, 69), 'requests.get', 'requests.get', ({(81, 21, 81, 68): "'https://api.reiyuura.me/api/dl/tiktok?url=' + tt"}, {}), "('https://api.reiyuura.me/api/dl/tiktok?url=' + tt)", False, 'import json, requests, os, shlex, asyncio, uuid, shutil\n'), ((83, 9, 83, 27), 'json.loads', 'json.loads', ({(83, 20, 83, 26): 'result'}, {}), '(result)', False, 'import json, requests, os, shlex, asyncio, uuid, shutil\n'), ((86, 8, 86, 52), 'requests.get', 'requests.get', (), '', False, 'import json, requests, os, shlex, asyncio, uuid, shutil\n'), ((89, 4, 89, 23), 'shutil.rmtree', 'shutil.rmtree', ({(89, 18, 89, 22): 'dirs'}, {}), '(dirs)', False, 'import json, requests, os, shlex, asyncio, uuid, shutil\n'), ((92, 4, 92, 21), 'os.makedirs', 'os.makedirs', ({(92, 16, 92, 20): 'dirs'}, {}), '(dirs)', False, 'import json, requests, os, shlex, asyncio, uuid, shutil\n'), ((97, 14, 97, 32), 'requests.Session', 'requests.Session', ({}, {}), '()', False, 'import json, requests, os, shlex, asyncio, uuid, shutil\n'), ((104, 8, 104, 69), 'requests.get', 'requests.get', ({(104, 21, 104, 68): "'https://api.reiyuura.me/api/dl/tiktok?url=' + tt"}, {}), "('https://api.reiyuura.me/api/dl/tiktok?url=' + tt)", False, 'import json, requests, os, shlex, asyncio, uuid, shutil\n'), ((106, 9, 106, 27), 'json.loads', 'json.loads', ({(106, 20, 106, 26): 'result'}, {}), '(result)', False, 'import json, requests, os, shlex, asyncio, uuid, shutil\n'), ((109, 8, 109, 52), 'requests.get', 'requests.get', (), '', False, 'import json, requests, os, shlex, asyncio, uuid, shutil\n'), ((112, 4, 112, 23), 'shutil.rmtree', 'shutil.rmtree', ({(112, 18, 112, 22): 'dirs'}, {}), '(dirs)', False, 'import json, requests, os, shlex, asyncio, uuid, shutil\n'), ((52, 115, 52, 150), 'pyrogram.types.InlineKeyboardMarkup', 'InlineKeyboardMarkup', ({(52, 136, 52, 149): 'START_BUTTONS'}, {}), '(START_BUTTONS)', False, 'from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup, CallbackQuery\n'), ((62, 68, 62, 100), 'pyrogram.types.InlineKeyboardMarkup', 'InlineKeyboardMarkup', ({(62, 89, 62, 99): 'DL_BUTTONS'}, {}), '(DL_BUTTONS)', False, 'from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup, CallbackQuery\n'), ((68, 28, 68, 40), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import json, requests, os, shlex, asyncio, uuid, shutil\n'), ((115, 4, 115, 21), 'os.makedirs', 'os.makedirs', ({(115, 16, 115, 20): 'dirs'}, {}), '(dirs)', False, 'import json, requests, os, shlex, asyncio, uuid, shutil\n'), ((120, 14, 120, 32), 'requests.Session', 'requests.Session', ({}, {}), '()', False, 'import json, requests, os, shlex, asyncio, uuid, shutil\n'), ((127, 8, 127, 69), 'requests.get', 'requests.get', ({(127, 21, 127, 68): "'https://api.reiyuura.me/api/dl/tiktok?url=' + tt"}, {}), "('https://api.reiyuura.me/api/dl/tiktok?url=' + tt)", False, 'import json, requests, os, shlex, asyncio, uuid, shutil\n'), ((129, 9, 129, 27), 'json.loads', 'json.loads', ({(129, 20, 129, 26): 'result'}, {}), '(result)', False, 'import json, requests, os, shlex, asyncio, uuid, shutil\n'), ((132, 8, 132, 52), 'requests.get', 'requests.get', (), '', False, 'import json, requests, os, shlex, asyncio, uuid, shutil\n'), ((137, 4, 137, 23), 'shutil.rmtree', 'shutil.rmtree', ({(137, 18, 137, 22): 'dirs'}, {}), '(dirs)', False, 'import json, requests, os, shlex, asyncio, uuid, shutil\n'), ((91, 28, 91, 40), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import json, requests, os, shlex, asyncio, uuid, shutil\n'), ((114, 28, 114, 40), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import json, requests, os, shlex, asyncio, uuid, shutil\n')] |
skyu0221/660-iot | frontend-gui/rpanel.py | d31f973c93871bfa8122f1b83364d0147d402e9e | import wx
import wx.adv
import random
import util
import config
import time
import datetime
import threading
import requests
import json
from functools import partial
class ReqeusterThread(threading.Thread):
# https://www.oreilly.com/library/view/python-cookbook/0596001673/ch06s03.html
def __init__(self, name, parent_thread, parent_panel):
threading.Thread.__init__(self, name=name)
self._stopevent = threading.Event()
self.parent_panel = parent_panel
self.parent_thread = parent_thread
def run(self):
while (not self._stopevent.is_set()) and self.parent_thread.is_alive():
print("hello")
# print(self.parent_panel.info_widget_dict)
# print(self.parent_panel.info)
# chnage to real time
end = datetime.datetime.now()
start = end - datetime.timedelta(minutes=1)
self.parent_panel.info["start"] = util.convert_to_GMT_zone(start)
self.parent_panel.info["end"] = util.convert_to_GMT_zone(end)
self.parent_panel._send_request(self.parent_panel.info)
self._stopevent.wait(5.0)
def join(self, timeout=None):
self._stopevent.set()
print("thread stop")
threading.Thread.join(self, timeout)
class RightPanel(wx.Panel):
def __init__(self, parent, info={}):
wx.Panel.__init__(self, parent=parent)
self.drop_down_menu_ID = None
self.result_visual_ID = None
self.info = info
self._init_UI()
def _init_UI(self):
self.SetBackgroundColour("#BAB86C")
font = wx.SystemSettings.GetFont(wx.SYS_SYSTEM_FONT)
font.SetPointSize(20)
vbox = wx.BoxSizer(wx.VERTICAL)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
# add question label
st1 = wx.StaticText(self, label='Question')
st1.SetFont(font)
hbox1.Add(st1, proportion=2, flag=wx.RIGHT, border=10)
# add drop down menu
question_list = [
"1. How many people are in the building?",
"2. How many people are in a specific room?",
"3. Where is someone?",
# "4. Which room has someone visited?",
"4. What is the utilization of a specific room?"
]
drop_down_menu = wx.ComboBox(self, choices=question_list)
hbox1.Add(drop_down_menu, proportion=8, flag=wx.TOP, border=5)
vbox1 = wx.BoxSizer(wx.VERTICAL)
# add result label
# st2 = wx.StaticText(self, label='Result')
# st2.SetFont(font)
# vbox1.Add(st2, proportion=1, flag=wx.ALIGN_CENTER, border=1)
# add canvas panel
# canvas_panel = CanvasPanel(self)
# vbox1.Add(canvas_panel, proportion=9, flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.BOTTOM, border=10)
result_panel = ResultPanel(self)
# result_panel.SetBackgroundColour("#000000")
vbox1.Add(result_panel, proportion=9, flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.BOTTOM, border=10)
vbox.Add(hbox1, proportion=1, flag=wx.EXPAND|wx.ALL, border=10)
vbox.Add(vbox1, proportion=9, flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.BOTTOM, border=10)
self.SetSizer(vbox)
# listen combo
drop_down_menu.Bind(wx.EVT_COMBOBOX, partial(self.on_selection,
combo_box=drop_down_menu,
panel=result_panel))
def on_selection(self, event, combo_box, panel):
# print(self.drop_down_menu.GetValue())
print(combo_box.GetValue())
panel.init_question_UI(combo_box.GetValue()[0])
# st2 = wx.StaticText(self, label=combo_box.GetValue())
# st2.SetFont(font)
# sizer1.Add(st2, proportion=1, flag=wx.ALIGN_CENTER, border=1)
class ResultPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
# self._init_UI()
self._q_dict = {"1": self._q1_panel,
"2": self._q2_panel,
"3": self._q3_panel,
# "4": self._q4_panel,
"4": self._q5_panel,}
self.info_widget_dict = {"feeder": {}, "consumer": {}}
self.worker = None
self.server = config.SERVER
self._set_font()
def _set_font(self):
self.font = wx.SystemSettings.GetFont(wx.SYS_SYSTEM_FONT)
self.font.SetPointSize(12)
self.font.MakeBold()
def init_question_UI(self, q_idx):
# clean the panel
for child in self.GetChildren():
child.Destroy()
# stop the worker
if self.worker:
# print("the worker has been stop")
self.worker.join()
self.worker = None
self.info_widget_dict["feeder"].clear()
self.info_widget_dict["consumer"].clear()
decorate_panel = self._q_dict[q_idx]
decorate_panel()
def add_date_time_picker_layout(self):
vbox = wx.BoxSizer(wx.VERTICAL)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
hbox3 = wx.BoxSizer(wx.HORIZONTAL)
# Start
start_label = wx.StaticText(self, label="START TIME")
start_label.SetFont(self.font)
dpc1 = wx.adv.DatePickerCtrl(self, -1, wx.DefaultDateTime)
tpc1 = wx.adv.TimePickerCtrl(self, -1, wx.DefaultDateTime)
hbox1.Add(start_label, proportion=2, flag=wx.RIGHT|wx.TOP, border=4)
hbox1.Add(dpc1, proportion=3, flag=wx.RIGHT, border=5)
hbox1.Add(tpc1, proportion=3, flag=wx.RIGHT, border=5)
vbox.Add(hbox1, proportion=0, flag=wx.ALL, border=5)
# End
end_label = wx.StaticText(self, label="END TIME")
end_label.SetFont(self.font)
dpc2 = wx.adv.DatePickerCtrl(self, -1, wx.DefaultDateTime)
tpc2 = wx.adv.TimePickerCtrl(self, -1, wx.DefaultDateTime)
hbox2.Add(end_label, proportion=2, flag=wx.RIGHT|wx.TOP, border=4)
hbox2.Add(dpc2, proportion=3, flag=wx.RIGHT, border=5)
hbox2.Add(tpc2, proportion=3, flag=wx.RIGHT, border=5)
vbox.Add(hbox2, proportion=0, flag=wx.ALL, border=5)
# Real time box
real_label = wx.StaticText(self, label="REAL TIME")
real_label.SetFont(self.font)
cb = wx.CheckBox(self)
hbox3.Add(real_label, proportion=2, flag=wx.RIGHT|wx.TOP, border=4)
hbox3.Add(cb, proportion=3, flag=wx.RIGHT|wx.TOP, border=5)
vbox.Add(hbox3, proportion=0, flag=wx.ALL, border=5)
self.info_widget_dict["feeder"]["start_date"] = dpc1
self.info_widget_dict["feeder"]["start_time"] = tpc1
self.info_widget_dict["feeder"]["end_date"] = dpc2
self.info_widget_dict["feeder"]["end_time"] = tpc2
self.info_widget_dict["feeder"]["real_time"] = cb
# self.SetBackgroundColour("#000000")
# r = lambda: random.randint(0,255)
# color = '#%02X%02X%02X' % (r(),r(),r())
return vbox
def _add_confirm_button(self, sizer, question_index):
"""
question_index => {1, 2, 3, 4}
"""
comfirm_btn = wx.Button(self, id=-1, label="Confirm")
sizer.Add(comfirm_btn, proportion=0, flag=wx.TOP|wx.LEFT, border=5)
# self.Bind(wx.EVT_BUTTON, self.OnClick, comfirm_btn)
self.Bind(wx.EVT_BUTTON, lambda event: self.OnClick(event, question_index), comfirm_btn)
def _add_result_label(self, sizer):
result_label = wx.StaticText(self, label="RESULT")
font = wx.SystemSettings.GetFont(wx.SYS_SYSTEM_FONT)
font.SetPointSize(20)
font.MakeBold()
result_label.SetFont(font)
sizer.Add(result_label, proportion=0, flag=wx.ALIGN_CENTER_HORIZONTAL, border=20)
def OnClick(self, event, question_index):
info = {}
# handle date and time
if question_index in [1, 2, 3, 4]:
start_date = self.info_widget_dict["feeder"]["start_date"].GetValue()
start_time = self.info_widget_dict["feeder"]["start_time"].GetValue()
end_date = self.info_widget_dict["feeder"]["end_date"].GetValue()
end_time = self.info_widget_dict["feeder"]["end_time"].GetValue()
info["start"] = util.combine_datetime(start_date, start_time)
info["end"] = util.combine_datetime(end_date, end_time)
# print("start time = {}".format(info["start"]))
# print("end time = {}".format(info["end"]))
if_real_time = self.info_widget_dict["feeder"]["real_time"].GetValue()
if question_index == 1:
# requester send request to server
pass
elif question_index == 2:
# requester send request to server
room = self.info_widget_dict["feeder"]["room_select"].GetValue()
print(room)
info["room"] = room
elif question_index == 3:
# requester send request to server
name = self.info_widget_dict["feeder"]["name_select"].GetValue()
print(name)
info["name"] = name
else: # question_index = 4
name = self.info_widget_dict["feeder"]["name_select"].GetValue()
print(name)
info["name"] = name
else: # question_index == 5
if_real_time = False
date = self.info_widget_dict["feeder"]["date_picker"].GetValue()
time = self.info_widget_dict["feeder"]["time_picker"].GetValue()
room = self.info_widget_dict["feeder"]["room_select"].GetValue()
info["date"] = util.combine_datetime(date, time)
info["room"] = room
# requester send request to server
info["question_index"] = question_index
self.info = info
if if_real_time:
if not self.worker:
self.worker = ReqeusterThread(name="question_{}_requester".format(question_index), parent_thread=threading.currentThread(), parent_panel=self)
self.worker.start()
print("start worker")
else:
# first check if the worker is working
if self.worker:
self.worker.join()
self.worker = None
self._send_request(info)
def _request_handle(self, url, body={}, params={}, METHOD="post"):
# https://stackoverflow.com/questions/15900338/python-request-post-with-param-data
print("url", url)
print("body", body)
print("params", params)
resp = {}
if METHOD == "post":
r = requests.post(url, data=body)
else:
r = requests.get(url, params=params)
print(r.status_code)
if r.status_code == 200:
resp = r.json()
print(resp)
print(type(resp))
return resp
def _send_request(self, info):
question_index = int(info["question_index"])
if question_index == 1:
## get ##
url = self.server + "/people_building/"
body = {"start": info["start"], "end": info["end"]}
# body = {'start': '2020-04-05 21:00:00', 'end': '2020-04-05 21:10:00'}
response = self._request_handle(url=url, body=body, METHOD="post")
try:
occu = str(response['count'])
except:
occu = str(0)
## received##
self.info_widget_dict["consumer"]["occu_label"].SetLabel(occu)
elif question_index == 2:
## get ##
url = self.server + "/people_room/"
body = {"room": info["room"],
"start": info["start"],
"end": info["end"],
# 'start': '2020-04-05 21:00:00', 'end': '2020-04-05 21:10:00'
}
response = self._request_handle(url=url, body=body, METHOD="post")
try:
occu = str(response['count'])
occupancy_info = response['occupancy_info']
except:
occu = str(0)
occupancy_info = []
## received ##
self.info_widget_dict["consumer"]["occu_label"].SetLabel(occu)
nlb = self.info_widget_dict["consumer"]["name_list"]
nlb.Clear()
for name in occupancy_info:
nlb.Append(name)
elif question_index == 3:
## get ##
url = self.server + "/person_room/"
body = {"name": info["name"],
"start": info["start"],
"end": info["end"],
# 'start': '2020-04-05 21:00:00', 'end': '2020-04-05 21:10:00'
}
response = self._request_handle(url=url, body=body, METHOD="post")
try:
room_list = response['room']
count = str(len(room_list))
except:
count = str(0)
room_list = []
## received ##
self.info_widget_dict["consumer"]["count_label"].SetLabel(count)
rlb = self.info_widget_dict["consumer"]["room_list"]
rlb.Clear()
for name in room_list:
rlb.Append(name)
elif question_index == 4:
## get ##
url = self.server + "question/4"
body = {"name": info["name"],
# "start_time": info["start"],
# "end_time": info["end"],
"time": info["start"],
}
response = self._request_handle(url=url, body=body, METHOD="post")
count = str(random.randint(0, 20))
room_list = ["Room_1_1_140", "Room_1_1_141"]
## received ##
self.info_widget_dict["consumer"]["count_label"].SetLabel(count)
rlb = self.info_widget_dict["consumer"]["room_list"]
rlb.Clear()
for name in room_list:
rlb.Append(name)
elif question_index == 5:
## get ##
url = self.server + "/utilization/"
body = {"room": info["room"],
"date": info["date"],
# 'date': '2020-04-05 20:00:00'
}
response = self._request_handle(url=url, body=body, METHOD="post")
# self.request_handle(url, body, METHOD="post")
try:
response = json.loads(response)
utilization = "{:.2f}".format(response["utilization"]*100) + "%"
except:
utilization = "0%"
## received##
self.info_widget_dict["consumer"]["utilization_label"].SetLabel(utilization)
def _q1_panel(self):
print("q1")
main_vbox = self.add_date_time_picker_layout()
# confirm button
self._add_confirm_button(main_vbox, 1)
# add result label
self._add_result_label(main_vbox)
# add result widget
hbox = wx.BoxSizer(wx.HORIZONTAL)
label = wx.StaticText(self, label="Occupancy")
label.SetFont(self.font)
hbox.Add(label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
occu_label = wx.StaticText(self, label="__")
occu_label.SetFont(self.font)
hbox.Add(occu_label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
main_vbox.Add(hbox, proportion=0, flag=wx.ALL, border=5)
self.info_widget_dict["consumer"]["occu_label"] = occu_label
self.SetSizer(main_vbox)
# https://stackoverflow.com/questions/42365239/wxpython-after-changing-panel-and-redo-layout-panel-is-very-small
self.Fit()
self.GetParent().SendSizeEvent()
def _q2_panel(self):
print("q2")
main_vbox = self.add_date_time_picker_layout()
# Room Info
room_hbox = wx.BoxSizer(wx.HORIZONTAL)
room_label = wx.StaticText(self, label="Room")
room_label.SetFont(self.font)
room_hbox.Add(room_label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
room_list = [
"",
"Room_1_1_140",
"Room_1_1_141",
"Room_1_1_142",
"Room_1_1_143",
"Room_1_1_144",
"Room_1_1_150",
"Room_1_1_184"]
room_combobox = wx.ComboBox(self, choices=room_list)
room_hbox.Add(room_combobox, proportion=8, flag=wx.TOP, border=5)
# room_info = wx.TextCtrl(self)
# room_hbox.Add(room_combobox, proportion=8, flag=wx.TOP, border=5)
main_vbox.Add(room_hbox, proportion=0, flag=wx.ALL, border=5)
# confirm button
self._add_confirm_button(main_vbox, 2)
# add result label
self._add_result_label(main_vbox)
# add widget infomation to dict
self.info_widget_dict["feeder"]["room_select"] = room_combobox
# add result widget
# add count
hbox = wx.BoxSizer(wx.HORIZONTAL)
label = wx.StaticText(self, label="Occupancy")
label.SetFont(self.font)
hbox.Add(label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
occu_label = wx.StaticText(self, label="__")
occu_label.SetFont(self.font)
hbox.Add(occu_label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
main_vbox.Add(hbox, proportion=0, flag=wx.ALL, border=5)
# add name list
namelb = wx.ListBox(self)
main_vbox.Add(namelb, proportion=0, flag=wx.ALL, border=5)
self.info_widget_dict["consumer"]["occu_label"] = occu_label
self.info_widget_dict["consumer"]["name_list"] = namelb
self.SetSizer(main_vbox)
# https://stackoverflow.com/questions/42365239/wxpython-after-changing-panel-and-redo-layout-panel-is-very-small
self.Fit()
self.GetParent().SendSizeEvent()
def _q3_panel(self):
print("q3")
vbox = self.add_date_time_picker_layout()
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
name_label = wx.StaticText(self, label="Name")
name_label.SetFont(self.font)
hbox1.Add(name_label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
name_text_ctrl = wx.TextCtrl(self)
name_text_ctrl.AppendText('Please enter unique name')
hbox1.Add(name_text_ctrl, proportion=8, flag=wx.TOP, border=5)
vbox.Add(hbox1, proportion=0, flag=wx.ALL, border=5)
# confirm button
self._add_confirm_button(vbox, 3)
# add result label
self._add_result_label(vbox)
# add widget infomation to dict
self.info_widget_dict["feeder"]["name_select"] = name_text_ctrl
# add result widget
# add count
hbox = wx.BoxSizer(wx.HORIZONTAL)
label = wx.StaticText(self, label="Room Count")
label.SetFont(self.font)
hbox.Add(label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
occu_label = wx.StaticText(self, label="__")
occu_label.SetFont(self.font)
hbox.Add(occu_label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
vbox.Add(hbox, proportion=0, flag=wx.ALL, border=5)
# add name list
roomlb = wx.ListBox(self)
vbox.Add(roomlb, proportion=0, flag=wx.ALL, border=5)
self.info_widget_dict["consumer"]["count_label"] = occu_label
self.info_widget_dict["consumer"]["room_list"] = roomlb
self.SetSizer(vbox)
# https://stackoverflow.com/questions/42365239/wxpython-after-changing-panel-and-redo-layout-panel-is-very-small
self.Fit()
self.GetParent().SendSizeEvent()
def _q4_panel(self):
print("q4")
main_vbox = self.add_date_time_picker_layout()
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
name_label = wx.StaticText(self, label="Name")
name_label.SetFont(self.font)
hbox1.Add(name_label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
name_text_ctrl = wx.TextCtrl(self)
name_text_ctrl.AppendText('Please enter unique name')
hbox1.Add(name_text_ctrl, proportion=8, flag=wx.TOP, border=5)
main_vbox.Add(hbox1, proportion=0, flag=wx.ALL, border=5)
# confirm button
self._add_confirm_button(main_vbox, 4)
# add result label
self._add_result_label(main_vbox)
# add widget infomation to dict
self.info_widget_dict["feeder"]["name_select"] = name_text_ctrl
# add result widget
# add count
hbox = wx.BoxSizer(wx.HORIZONTAL)
label = wx.StaticText(self, label="Room Count")
label.SetFont(self.font)
hbox.Add(label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
occu_label = wx.StaticText(self, label="__")
occu_label.SetFont(self.font)
hbox.Add(occu_label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
main_vbox.Add(hbox, proportion=0, flag=wx.ALL, border=5)
# add name list
roomlb = wx.ListBox(self)
main_vbox.Add(roomlb, proportion=0, flag=wx.ALL, border=5)
self.info_widget_dict["consumer"]["count_label"] = occu_label
self.info_widget_dict["consumer"]["room_list"] = roomlb
self.SetSizer(main_vbox)
# https://stackoverflow.com/questions/42365239/wxpython-after-changing-panel-and-redo-layout-panel-is-very-small
self.Fit()
self.GetParent().SendSizeEvent()
def _q5_panel(self):
print("q5")
vbox = wx.BoxSizer(wx.VERTICAL)
# datetime
date_hbox = wx.BoxSizer(wx.HORIZONTAL)
date_label = wx.StaticText(self, label="Datetime")
date_label.SetFont(self.font)
dpc = wx.adv.DatePickerCtrl(self, -1, wx.DefaultDateTime)
tpc = wx.adv.TimePickerCtrl(self, -1, wx.DefaultDateTime)
date_hbox.Add(date_label, proportion=2, flag=wx.RIGHT|wx.TOP, border=4)
date_hbox.Add(dpc, proportion=3, flag=wx.RIGHT, border=5)
date_hbox.Add(tpc, proportion=3, flag=wx.RIGHT, border=5)
vbox.Add(date_hbox, proportion=0, flag=wx.ALL, border=5)
# Room Info
room_hbox = wx.BoxSizer(wx.HORIZONTAL)
room_label = wx.StaticText(self, label="Room")
room_label.SetFont(self.font)
room_hbox.Add(room_label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
room_list = [
"",
"Room_1_1_140",
"Room_1_1_141",
"Room_1_1_142",
"Room_1_1_143",
"Room_1_1_144",
"Room_1_1_150",
"Room_1_1_184"]
room_combobox = wx.ComboBox(self, choices=room_list)
room_hbox.Add(room_combobox, proportion=8, flag=wx.TOP, border=5)
vbox.Add(room_hbox, proportion=0, flag=wx.ALL, border=5)
# confirm button
self._add_confirm_button(vbox, 5)
# add result label
self._add_result_label(vbox)
# add widget infomation to dict
self.info_widget_dict["feeder"]["date_picker"] = dpc
self.info_widget_dict["feeder"]["time_picker"] = tpc
self.info_widget_dict["feeder"]["room_select"] = room_combobox
# add result widget
hbox = wx.BoxSizer(wx.HORIZONTAL)
label = wx.StaticText(self, label="Utilization")
label.SetFont(self.font)
hbox.Add(label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
occu_label = wx.StaticText(self, label="__")
occu_label.SetFont(self.font)
hbox.Add(occu_label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
vbox.Add(hbox, proportion=0, flag=wx.ALL, border=5)
self.info_widget_dict["consumer"]["utilization_label"] = occu_label
self.SetSizer(vbox)
# https://stackoverflow.com/questions/42365239/wxpython-after-changing-panel-and-redo-layout-panel-is-very-small
self.Fit()
self.GetParent().SendSizeEvent()
| [((21, 8, 21, 50), 'threading.Thread.__init__', 'threading.Thread.__init__', (), '', False, 'import threading\n'), ((23, 26, 23, 43), 'threading.Event', 'threading.Event', ({}, {}), '()', False, 'import threading\n'), ((48, 8, 48, 44), 'threading.Thread.join', 'threading.Thread.join', ({(48, 30, 48, 34): 'self', (48, 36, 48, 43): 'timeout'}, {}), '(self, timeout)', False, 'import threading\n'), ((54, 8, 54, 46), 'wx.Panel.__init__', 'wx.Panel.__init__', (), '', False, 'import wx\n'), ((63, 15, 63, 60), 'wx.SystemSettings.GetFont', 'wx.SystemSettings.GetFont', ({(63, 41, 63, 59): 'wx.SYS_SYSTEM_FONT'}, {}), '(wx.SYS_SYSTEM_FONT)', False, 'import wx\n'), ((66, 15, 66, 39), 'wx.BoxSizer', 'wx.BoxSizer', ({(66, 27, 66, 38): 'wx.VERTICAL'}, {}), '(wx.VERTICAL)', False, 'import wx\n'), ((68, 16, 68, 42), 'wx.BoxSizer', 'wx.BoxSizer', ({(68, 28, 68, 41): 'wx.HORIZONTAL'}, {}), '(wx.HORIZONTAL)', False, 'import wx\n'), ((70, 14, 70, 51), 'wx.StaticText', 'wx.StaticText', (), '', False, 'import wx\n'), ((82, 25, 82, 65), 'wx.ComboBox', 'wx.ComboBox', (), '', False, 'import wx\n'), ((85, 16, 85, 40), 'wx.BoxSizer', 'wx.BoxSizer', ({(85, 28, 85, 39): 'wx.VERTICAL'}, {}), '(wx.VERTICAL)', False, 'import wx\n'), ((124, 8, 124, 39), 'wx.Panel.__init__', 'wx.Panel.__init__', ({(124, 26, 124, 30): 'self', (124, 32, 124, 38): 'parent'}, {}), '(self, parent)', False, 'import wx\n'), ((141, 20, 141, 65), 'wx.SystemSettings.GetFont', 'wx.SystemSettings.GetFont', ({(141, 46, 141, 64): 'wx.SYS_SYSTEM_FONT'}, {}), '(wx.SYS_SYSTEM_FONT)', False, 'import wx\n'), ((164, 15, 164, 39), 'wx.BoxSizer', 'wx.BoxSizer', ({(164, 27, 164, 38): 'wx.VERTICAL'}, {}), '(wx.VERTICAL)', False, 'import wx\n'), ((165, 16, 165, 42), 'wx.BoxSizer', 'wx.BoxSizer', ({(165, 28, 165, 41): 'wx.HORIZONTAL'}, {}), '(wx.HORIZONTAL)', False, 'import wx\n'), ((166, 16, 166, 42), 'wx.BoxSizer', 'wx.BoxSizer', ({(166, 28, 166, 41): 'wx.HORIZONTAL'}, {}), '(wx.HORIZONTAL)', False, 'import wx\n'), ((167, 16, 167, 42), 'wx.BoxSizer', 'wx.BoxSizer', ({(167, 28, 167, 41): 'wx.HORIZONTAL'}, {}), '(wx.HORIZONTAL)', False, 'import wx\n'), ((170, 22, 170, 61), 'wx.StaticText', 'wx.StaticText', (), '', False, 'import wx\n'), ((172, 15, 172, 66), 'wx.adv.DatePickerCtrl', 'wx.adv.DatePickerCtrl', ({(172, 37, 172, 41): 'self', (172, 43, 172, 45): '-1', (172, 47, 172, 65): 'wx.DefaultDateTime'}, {}), '(self, -1, wx.DefaultDateTime)', False, 'import wx\n'), ((173, 15, 173, 66), 'wx.adv.TimePickerCtrl', 'wx.adv.TimePickerCtrl', ({(173, 37, 173, 41): 'self', (173, 43, 173, 45): '-1', (173, 47, 173, 65): 'wx.DefaultDateTime'}, {}), '(self, -1, wx.DefaultDateTime)', False, 'import wx\n'), ((181, 20, 181, 59), 'wx.StaticText', 'wx.StaticText', (), '', False, 'import wx\n'), ((183, 15, 183, 66), 'wx.adv.DatePickerCtrl', 'wx.adv.DatePickerCtrl', ({(183, 37, 183, 41): 'self', (183, 43, 183, 45): '-1', (183, 47, 183, 65): 'wx.DefaultDateTime'}, {}), '(self, -1, wx.DefaultDateTime)', False, 'import wx\n'), ((184, 15, 184, 66), 'wx.adv.TimePickerCtrl', 'wx.adv.TimePickerCtrl', ({(184, 37, 184, 41): 'self', (184, 43, 184, 45): '-1', (184, 47, 184, 65): 'wx.DefaultDateTime'}, {}), '(self, -1, wx.DefaultDateTime)', False, 'import wx\n'), ((191, 21, 191, 59), 'wx.StaticText', 'wx.StaticText', (), '', False, 'import wx\n'), ((193, 13, 193, 30), 'wx.CheckBox', 'wx.CheckBox', ({(193, 25, 193, 29): 'self'}, {}), '(self)', False, 'import wx\n'), ((214, 22, 214, 61), 'wx.Button', 'wx.Button', (), '', False, 'import wx\n'), ((223, 23, 223, 58), 'wx.StaticText', 'wx.StaticText', (), '', False, 'import wx\n'), ((224, 15, 224, 60), 'wx.SystemSettings.GetFont', 'wx.SystemSettings.GetFont', ({(224, 41, 224, 59): 'wx.SYS_SYSTEM_FONT'}, {}), '(wx.SYS_SYSTEM_FONT)', False, 'import wx\n'), ((456, 15, 456, 41), 'wx.BoxSizer', 'wx.BoxSizer', ({(456, 27, 456, 40): 'wx.HORIZONTAL'}, {}), '(wx.HORIZONTAL)', False, 'import wx\n'), ((457, 16, 457, 54), 'wx.StaticText', 'wx.StaticText', (), '', False, 'import wx\n'), ((460, 21, 460, 52), 'wx.StaticText', 'wx.StaticText', (), '', False, 'import wx\n'), ((481, 20, 481, 46), 'wx.BoxSizer', 'wx.BoxSizer', ({(481, 32, 481, 45): 'wx.HORIZONTAL'}, {}), '(wx.HORIZONTAL)', False, 'import wx\n'), ((482, 21, 482, 54), 'wx.StaticText', 'wx.StaticText', (), '', False, 'import wx\n'), ((496, 24, 496, 60), 'wx.ComboBox', 'wx.ComboBox', (), '', False, 'import wx\n'), ((514, 15, 514, 41), 'wx.BoxSizer', 'wx.BoxSizer', ({(514, 27, 514, 40): 'wx.HORIZONTAL'}, {}), '(wx.HORIZONTAL)', False, 'import wx\n'), ((515, 16, 515, 54), 'wx.StaticText', 'wx.StaticText', (), '', False, 'import wx\n'), ((518, 21, 518, 52), 'wx.StaticText', 'wx.StaticText', (), '', False, 'import wx\n'), ((524, 17, 524, 33), 'wx.ListBox', 'wx.ListBox', ({(524, 28, 524, 32): 'self'}, {}), '(self)', False, 'import wx\n'), ((541, 16, 541, 42), 'wx.BoxSizer', 'wx.BoxSizer', ({(541, 28, 541, 41): 'wx.HORIZONTAL'}, {}), '(wx.HORIZONTAL)', False, 'import wx\n'), ((543, 21, 543, 54), 'wx.StaticText', 'wx.StaticText', (), '', False, 'import wx\n'), ((547, 25, 547, 42), 'wx.TextCtrl', 'wx.TextCtrl', ({(547, 37, 547, 41): 'self'}, {}), '(self)', False, 'import wx\n'), ((564, 15, 564, 41), 'wx.BoxSizer', 'wx.BoxSizer', ({(564, 27, 564, 40): 'wx.HORIZONTAL'}, {}), '(wx.HORIZONTAL)', False, 'import wx\n'), ((565, 16, 565, 55), 'wx.StaticText', 'wx.StaticText', (), '', False, 'import wx\n'), ((568, 21, 568, 52), 'wx.StaticText', 'wx.StaticText', (), '', False, 'import wx\n'), ((574, 17, 574, 33), 'wx.ListBox', 'wx.ListBox', ({(574, 28, 574, 32): 'self'}, {}), '(self)', False, 'import wx\n'), ((592, 16, 592, 42), 'wx.BoxSizer', 'wx.BoxSizer', ({(592, 28, 592, 41): 'wx.HORIZONTAL'}, {}), '(wx.HORIZONTAL)', False, 'import wx\n'), ((594, 21, 594, 54), 'wx.StaticText', 'wx.StaticText', (), '', False, 'import wx\n'), ((598, 25, 598, 42), 'wx.TextCtrl', 'wx.TextCtrl', ({(598, 37, 598, 41): 'self'}, {}), '(self)', False, 'import wx\n'), ((615, 15, 615, 41), 'wx.BoxSizer', 'wx.BoxSizer', ({(615, 27, 615, 40): 'wx.HORIZONTAL'}, {}), '(wx.HORIZONTAL)', False, 'import wx\n'), ((616, 16, 616, 55), 'wx.StaticText', 'wx.StaticText', (), '', False, 'import wx\n'), ((619, 21, 619, 52), 'wx.StaticText', 'wx.StaticText', (), '', False, 'import wx\n'), ((625, 17, 625, 33), 'wx.ListBox', 'wx.ListBox', ({(625, 28, 625, 32): 'self'}, {}), '(self)', False, 'import wx\n'), ((640, 15, 640, 39), 'wx.BoxSizer', 'wx.BoxSizer', ({(640, 27, 640, 38): 'wx.VERTICAL'}, {}), '(wx.VERTICAL)', False, 'import wx\n'), ((643, 20, 643, 46), 'wx.BoxSizer', 'wx.BoxSizer', ({(643, 32, 643, 45): 'wx.HORIZONTAL'}, {}), '(wx.HORIZONTAL)', False, 'import wx\n'), ((644, 21, 644, 58), 'wx.StaticText', 'wx.StaticText', (), '', False, 'import wx\n'), ((646, 14, 646, 65), 'wx.adv.DatePickerCtrl', 'wx.adv.DatePickerCtrl', ({(646, 36, 646, 40): 'self', (646, 42, 646, 44): '-1', (646, 46, 646, 64): 'wx.DefaultDateTime'}, {}), '(self, -1, wx.DefaultDateTime)', False, 'import wx\n'), ((647, 14, 647, 65), 'wx.adv.TimePickerCtrl', 'wx.adv.TimePickerCtrl', ({(647, 36, 647, 40): 'self', (647, 42, 647, 44): '-1', (647, 46, 647, 64): 'wx.DefaultDateTime'}, {}), '(self, -1, wx.DefaultDateTime)', False, 'import wx\n'), ((654, 20, 654, 46), 'wx.BoxSizer', 'wx.BoxSizer', ({(654, 32, 654, 45): 'wx.HORIZONTAL'}, {}), '(wx.HORIZONTAL)', False, 'import wx\n'), ((655, 21, 655, 54), 'wx.StaticText', 'wx.StaticText', (), '', False, 'import wx\n'), ((669, 24, 669, 60), 'wx.ComboBox', 'wx.ComboBox', (), '', False, 'import wx\n'), ((686, 15, 686, 41), 'wx.BoxSizer', 'wx.BoxSizer', ({(686, 27, 686, 40): 'wx.HORIZONTAL'}, {}), '(wx.HORIZONTAL)', False, 'import wx\n'), ((687, 16, 687, 56), 'wx.StaticText', 'wx.StaticText', (), '', False, 'import wx\n'), ((690, 21, 690, 52), 'wx.StaticText', 'wx.StaticText', (), '', False, 'import wx\n'), ((35, 18, 35, 41), 'datetime.datetime.now', 'datetime.datetime.now', ({}, {}), '()', False, 'import datetime\n'), ((37, 46, 37, 77), 'util.convert_to_GMT_zone', 'util.convert_to_GMT_zone', ({(37, 71, 37, 76): 'start'}, {}), '(start)', False, 'import util\n'), ((38, 44, 38, 73), 'util.convert_to_GMT_zone', 'util.convert_to_GMT_zone', ({(38, 69, 38, 72): 'end'}, {}), '(end)', False, 'import util\n'), ((106, 45, 108, 71), 'functools.partial', 'partial', (), '', False, 'from functools import partial\n'), ((242, 28, 242, 73), 'util.combine_datetime', 'util.combine_datetime', ({(242, 50, 242, 60): 'start_date', (242, 62, 242, 72): 'start_time'}, {}), '(start_date, start_time)', False, 'import util\n'), ((243, 26, 243, 67), 'util.combine_datetime', 'util.combine_datetime', ({(243, 48, 243, 56): 'end_date', (243, 58, 243, 66): 'end_time'}, {}), '(end_date, end_time)', False, 'import util\n'), ((283, 27, 283, 60), 'util.combine_datetime', 'util.combine_datetime', ({(283, 49, 283, 53): 'date', (283, 55, 283, 59): 'time'}, {}), '(date, time)', False, 'import util\n'), ((310, 16, 310, 45), 'requests.post', 'requests.post', (), '', False, 'import requests\n'), ((312, 16, 312, 48), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((36, 26, 36, 55), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((292, 113, 292, 138), 'threading.currentThread', 'threading.currentThread', ({}, {}), '()', False, 'import threading\n'), ((409, 24, 409, 45), 'random.randint', 'random.randint', ({(409, 39, 409, 40): '0', (409, 42, 409, 44): '20'}, {}), '(0, 20)', False, 'import random\n'), ((434, 27, 434, 47), 'json.loads', 'json.loads', ({(434, 38, 434, 46): 'response'}, {}), '(response)', False, 'import json\n')] |
henchan/memfinity | webapp/search.py | 3860985e29b203f0569f60eea68ffb22aaf34b1f | """High-level search API.
This module implements application-specific search semantics on top of
App Engine's search API. There are two chief operations: querying for
entities, and managing entities in the search facility.
Add and remove Card entities in the search facility:
insert_cards([models.Card])
delete_cards([models.Card])
Query for Card entities:
query_cards(query_string, limit=20) -> search.SearchResults
The results items will have the following fields:
user_key, user_nickname, front, back, info, tag (repeated), added,
modified, source_url
The query_string is free-form, as a user would enter it, and passes
through a custom query processor before the query is submitted to App
Engine. Notably, pass @username to restrict the query to entities
authored by username, and #tag to restrict the query to only documents
matching the given tag. Multiple @usernames or #tags result in an OR
query.
"""
import re
from google.appengine.api import search
from google.appengine.ext import ndb
QUERY_LIMIT = 20
CARD_INDEX_NAME = 'cards'
# Increase this value when _card2doc changes its format so that
# queries can determine the data available on returned documents.
CARD_DOCUMENT_VERSION = '1'
# Ensure we're under the 2000 character limit from
# https://developers.google.com/appengine/docs/python/search/query_strings
MAX_QUERY_LEN = 200
# TODO(chris): it would be better if this module didn't know about
# specific entity types, but instead defined a protocol to get
# metadata from an entity and generate a document.
def insert_cards(cards):
"""Insert or update models.Card entities in the search facility."""
# TODO(chris): should we allow more than 200 cards per call?
assert len(cards) <= 200, len(cards)
card_docs = map(_card2doc, cards)
index = search.Index(name=CARD_INDEX_NAME)
index.put(card_docs)
def delete_cards(cards):
"""Delete models.Card entities from the search facility."""
index = search.Index(name=CARD_INDEX_NAME)
card_doc_ids = map(_card2docid, cards)
index.delete(card_doc_ids)
def query_cards(query_str, limit=QUERY_LIMIT, web_safe_cursor=None,
ids_only=False, user_key=None):
"""Return the search.SearchResults for a query.
ids_only is useful because the returned document IDs are url-safe
keys for models.Card entities.
"""
if web_safe_cursor:
cursor = search.Cursor(web_safe_string=web_safe_cursor)
else:
cursor = None
index = search.Index(name=CARD_INDEX_NAME)
query_processor = _QueryProcessor(
query_str,
name_field='user_nickname',
tag_field='tag',
private_field='private',
user_key_field='user_key',
query_options=search.QueryOptions(limit=limit, cursor=cursor,
ids_only=ids_only),
user_key=user_key)
search_results = index.search(query_processor.query())
# TODO(chris): should this return partially-instantiated
# models.Card instances instead of leaking implementation details
# like we do now?
return search_results
def _card2doc(card):
# TODO(chris): should we include all fields that would be needed
# for rendering a search results item to avoid entity lookup?
tag_fields = [search.AtomField(name='tag', value=tag) for tag in card.tags]
doc = search.Document(
doc_id=_card2docid(card),
fields=[
search.AtomField(name='doc_version', value=CARD_DOCUMENT_VERSION),
search.AtomField(name='user_key', value=card.user_key.urlsafe()),
# TODO(chris): is user_nickname always a direct-match
# shortname, e.g., @chris?
search.AtomField(name='user_nickname', value=card.user_nickname),
# TODO(chris): support HtmlField for richer cards?
search.TextField(name='front', value=card.front),
search.TextField(name='back', value=card.back),
search.TextField(name='info', value=card.info),
search.DateField(name='added', value=card.added),
search.DateField(name='modified', value=card.modified),
search.AtomField(name='source_url', value=card.source_url),
search.AtomField(name='private', value="1" if card.private else "0"),
] + tag_fields)
return doc
def _card2docid(card):
# We set the search.Document's ID to the entity key it mirrors.
return card.key.urlsafe()
def _sanitize_user_input(query_str):
# The search API puts special meaning on certain inputs and we
# don't want to expose the internal query language to users so
# we strictly restrict inputs. The rules are:
#
# Allowed characters for values are [a-zA-Z0-9._-].
# @name is removed and 'name' values returned as a list.
# #tag is removed and 'tag' values returned as a list.
terms, names, tags = [], [], []
for token in query_str.split():
# TODO(chris): allow international characters.
sane_token = re.sub(r'[^a-zA-Z0-9._-]+', '', token)
if sane_token:
if sane_token in ('AND', 'OK'):
continue # ignore special search keywords
elif token.startswith('@'):
names.append(sane_token)
elif token.startswith('#'):
tags.append(sane_token)
else:
terms.append(sane_token)
return terms, names, tags
class _QueryProcessor(object):
"""Simple queries, possibly with @name and #tag tokens.
name_field is the field @name tokens should apply to.
tag_field is the name of the field #tag tokens should apply to.
"""
def __init__(self, query_str,
name_field, tag_field, private_field, user_key_field,
query_options=None, user_key=None):
self.query_str = query_str
self.name_field = name_field
self.tag_field = tag_field
self.private_field = private_field
self.user_key_field = user_key_field
self.query_options = query_options
self.user_key = user_key
def _sanitize_user_input(self):
query_str = self.query_str[:MAX_QUERY_LEN]
return _sanitize_user_input(query_str)
def _build_query_string(self):
terms, names, tags = self._sanitize_user_input()
# Our simply query logic is to OR together all terms from the
# user, then AND in the name or tag filters (plus a privacy clause).
parts = []
if terms:
parts.append(' OR '.join(terms))
if names:
parts.append('%s: (%s)' % (self.name_field, ' OR '.join(names)))
if tags:
parts.append('%s: (%s)' % (self.tag_field, ' OR '.join(tags)))
# Don't return cards that other users have marked private...
privacy = '%s: 0' % self.private_field
if self.user_key:
# ... but always show the user their own cards in results.
privacy += ' OR %s: (%s)' % (self.user_key_field, self.user_key)
parts.append('(' + privacy + ')')
return ' AND '.join(parts)
def query(self):
query = search.Query(
query_string=self._build_query_string(),
options=self.query_options)
return query
| [((56, 12, 56, 46), 'google.appengine.api.search.Index', 'search.Index', (), '', False, 'from google.appengine.api import search\n'), ((62, 12, 62, 46), 'google.appengine.api.search.Index', 'search.Index', (), '', False, 'from google.appengine.api import search\n'), ((79, 12, 79, 46), 'google.appengine.api.search.Index', 'search.Index', (), '', False, 'from google.appengine.api import search\n'), ((75, 17, 75, 63), 'google.appengine.api.search.Cursor', 'search.Cursor', (), '', False, 'from google.appengine.api import search\n'), ((99, 18, 99, 57), 'google.appengine.api.search.AtomField', 'search.AtomField', (), '', False, 'from google.appengine.api import search\n'), ((136, 21, 136, 59), 're.sub', 're.sub', ({(136, 28, 136, 47): '"""[^a-zA-Z0-9._-]+"""', (136, 49, 136, 51): '""""""', (136, 53, 136, 58): 'token'}, {}), "('[^a-zA-Z0-9._-]+', '', token)", False, 'import re\n'), ((86, 22, 87, 60), 'google.appengine.api.search.QueryOptions', 'search.QueryOptions', (), '', False, 'from google.appengine.api import search\n'), ((103, 12, 103, 77), 'google.appengine.api.search.AtomField', 'search.AtomField', (), '', False, 'from google.appengine.api import search\n'), ((107, 12, 107, 76), 'google.appengine.api.search.AtomField', 'search.AtomField', (), '', False, 'from google.appengine.api import search\n'), ((109, 12, 109, 60), 'google.appengine.api.search.TextField', 'search.TextField', (), '', False, 'from google.appengine.api import search\n'), ((110, 12, 110, 58), 'google.appengine.api.search.TextField', 'search.TextField', (), '', False, 'from google.appengine.api import search\n'), ((111, 12, 111, 58), 'google.appengine.api.search.TextField', 'search.TextField', (), '', False, 'from google.appengine.api import search\n'), ((112, 12, 112, 60), 'google.appengine.api.search.DateField', 'search.DateField', (), '', False, 'from google.appengine.api import search\n'), ((113, 12, 113, 66), 'google.appengine.api.search.DateField', 'search.DateField', (), '', False, 'from google.appengine.api import search\n'), ((114, 12, 114, 70), 'google.appengine.api.search.AtomField', 'search.AtomField', (), '', False, 'from google.appengine.api import search\n'), ((115, 12, 115, 80), 'google.appengine.api.search.AtomField', 'search.AtomField', (), '', False, 'from google.appengine.api import search\n')] |
Baidi96/AI-Agent-for-Light-Rider | Bot/Bot/board.py | 6ae0cd4ea07248751c0f015ed74123ae3dec33d1 | import copy
import sys
PLAYER1, PLAYER2, EMPTY, BLOCKED = [0, 1, 2, 3]
S_PLAYER1, S_PLAYER2, S_EMPTY, S_BLOCKED, = ['0', '1', '.', 'x']
CHARTABLE = [(PLAYER1, S_PLAYER1), (PLAYER2, S_PLAYER2), (EMPTY, S_EMPTY), (BLOCKED, S_BLOCKED)]
DIRS = [
((-1, 0), "up"),
((1, 0), "down"),
((0, 1), "right"),
((0, -1), "left")
]
#the information of the whole grid
class Board:
def __init__(self, width, height):
self.width = width
self.height = height
self.cell = [[EMPTY for col in range (0, width)] for row in range(0, height)]
def parse_cell_char(self, players, row, col, char):
result = -1
if char == S_PLAYER1:
players[0].row = row;
players[0].col = col;
elif char == S_PLAYER2:
players[1].row = row;
players[1].col = col;
for (i, symbol) in CHARTABLE:
if symbol == char:
result = i
break
return result
def parse_cell(self, players, row, col, data):
cell = []
for char in data:
item = self.parse_cell_char(players, row, col, char)
cell.append(item)
return cell
def parse(self, players, data):
cells = data.split(',')
col = 0
row = 0
for cell in cells:
if (col >= self.width):
col = 0
row +=1
self.cell[row][col] = self.parse_cell(players, row, col, cell)
col += 1
def in_bounds (self, row, col):
return row >= 0 and col >= 0 and col < self.width and row < self.height
def is_legal(self, row, col, my_id):
enemy_id = my_id ^ 1
return (self.in_bounds(row, col)) and (not BLOCKED == self.cell[row][col]) and (not enemy_id == self.cell[row][col])
def is_legal_tuple(self, loc):
row, col = loc
return self.is_legal(row, col)
def get_adjacent(self, row, col):
result = []
for (o_row, o_col), _ in DIRS:
t_row, t_col = o_row + row, o_col + col
if self.is_legal(t_row, t_col):
result.append((t_row, t_col))
return result
def legal_moves(self, my_id, players):
my_player = players[my_id]
result = []
for ((o_row, o_col), order) in DIRS:
t_row = my_player.row + o_row
t_col = my_player.col + o_col
if self.is_legal(t_row, t_col, my_id):
result.append(((o_row, o_col), order))
else:
pass
return result
def update_cell(self, row, col, data):
self.cell[row][col] = data
def output_cell(self, cell):
done = False
for (i, symbol) in CHARTABLE:
if i == cell:
if not done:
sys.stderr.write(symbol)
done = True
break
if not done:
sys.stderr.write("!")
done = True
def output(self):
for row in self.cell:
sys.stderr.write("\n")
for cell in row:
self.output_cell(cell)
sys.stderr.write("\n")
sys.stderr.flush()
def tostring(self):
res = ""
for row in xrange(self.height):
for col in xrange(self.width):
res += str(self.cell[row][col])
res += ","
return res
| [((107, 8, 107, 30), 'sys.stderr.write', 'sys.stderr.write', ({(107, 25, 107, 29): '"""\n"""'}, {}), "('\\n')", False, 'import sys\n'), ((108, 8, 108, 26), 'sys.stderr.flush', 'sys.stderr.flush', ({}, {}), '()', False, 'import sys\n'), ((99, 12, 99, 33), 'sys.stderr.write', 'sys.stderr.write', ({(99, 29, 99, 32): '"""!"""'}, {}), "('!')", False, 'import sys\n'), ((104, 12, 104, 34), 'sys.stderr.write', 'sys.stderr.write', ({(104, 29, 104, 33): '"""\n"""'}, {}), "('\\n')", False, 'import sys\n'), ((95, 20, 95, 44), 'sys.stderr.write', 'sys.stderr.write', ({(95, 37, 95, 43): 'symbol'}, {}), '(symbol)', False, 'import sys\n')] |
wonnerky/coteMaster | baekjoon/1012.py | 360e491e6342c1ee42ff49750b838a2ead865613 | import sys
sys.setrecursionlimit(10000)
def dfs(r, c):
global visit
visit[r][c] = True
mov = [(-1, 0), (0, -1), (1, 0), (0, 1)]
for i in range(4):
dr, dc = mov[i]
nr, nc = r + dr, c + dc
if 0 <= nr < N and 0 <= nc < M and visit[nr][nc] == False and board[nr][nc] == 1:
dfs(nr, nc)
T = int(input())
for _ in range(T):
M, N, K = map(int, input().split())
board = [[0] * M for _ in range(N)]
for _ in range(K):
c, r = map(int, input().split())
board[r][c] = 1
visit = [[False] * M for _ in range(N)]
cnt = 0
for r in range(N):
for c in range(M):
if not visit[r][c] and board[r][c] == 1:
cnt += 1
dfs(r, c)
for ele in visit:
print(ele)
print()
print(cnt) | [((2, 0, 2, 28), 'sys.setrecursionlimit', 'sys.setrecursionlimit', ({(2, 22, 2, 27): '(10000)'}, {}), '(10000)', False, 'import sys\n')] |
daemonslayer/Notebook | collection/cp/algorithms-master/python/binary_tree.py | a9880be9bd86955afd6b8f7352822bc18673eda3 | """
Binary Tree and basic properties
1. In-Order Traversal
2. Pre-Order Traversal
3. Post-Order Traversal
4. Level-Order Traversal
"""
from collections import deque
class BinaryTree(object):
"""
Representation of a general binary tree
data: value of element
left: Left subtree
right: Right subtree
"""
def __init__(self, data, left=None, right=None):
if data is None:
raise ValueError('data cannot be null')
self.data = data
self.left = left
self.right = right
def insert(self, data):
raise NotImplementedError('Method insert is not Implemented')
def delete(self, data):
raise NotImplementedError('Method delete is not implemented')
def inorder_traversal(self, write=True):
"""
Return list of node data as inorder traversal. If write is True then print as well.
This is a iterative tree inorder traversal.
Algorithm:
1. Create a stack of nodes node_stack
2. Mark root as current
3. While current is not none or node_stack is not empty
a. While current is not empty push current to nde_stack and reassign current to current->left
b. If current is empty and node_stack is not empty then pop the top of stack and print that node
c. mark current as poped_node->right
"""
traversal_lis = []
node_stack = []
current = self
while current or node_stack:
while current:
node_stack.append(current)
current = current.left
if node_stack:
node = node_stack.pop()
traversal_lis.append(node.data)
current = node.right
if write:
for item in traversal_lis:
print(item, end=' ')
return traversal_lis
def preorder_traversal(self, write=True):
"""
Return list of node data as preorder traversal. If write is true then print as well.
Algorithm:
1. Create stack of nodes as node_stack
2. Mark root as current
3. While current is not none or node_stack is not empty
a. While current is not empty
i. Push current to node_stack
ii. Add current->data to traversal_list
iii. Reassign current to current->left
b. If node_stack is not empty then pop the topmost node from node_stack and assign current to
poped_node->right
"""
traversal_lis = []
node_stack = []
current = self
while current or node_stack:
while current:
node_stack.append(current)
traversal_lis.append(current.data)
current = current.left
if node_stack:
node = node_stack.pop()
current = node.right
if write:
for item in traversal_lis:
print(item, end=' ')
return traversal_lis
def postorder_traversal(self, write=True):
"""
Return list of node data as postorder traversal. If write is true then print as well.
Algorithm:
1. Create stack of nodes as node_stack
2. Mark root as current
3. While current is not None or node_stack is not empty
a. While current is not None
i. Push current to node_stack
ii. Append current->data to traversal_list
iii. Reassign current as current->right !IMPORTANT: Here we're iterating on current-right as we're doing
postorder traversal
b. If node_stack is not empty then pop top node and assign poped_node->left to current
"""
traversal_lis = []
node_stack = []
current = self
while current or node_stack:
while current:
node_stack.append(current)
traversal_lis.append(current.data)
current = current.right
if node_stack:
node = node_stack.pop()
current = node.left
if write:
for item in traversal_lis:
print(item, end=' ')
return traversal_lis
def levelorder_traversal(self, write=True):
"""
Return list of node data as level order traversal. If write is true then print as well.
Algorithm:
1. Maintain a queue of nodes to process as node_queue
2. Push root to node_queue
3. While node_queue is not empty
a. Get top node of node_queue as top
b. Push top->data to traversal_list
c. Append top->left and top->right into node_queue if they are not null
"""
traversal_list = []
node_queue = deque()
node_queue.append(self)
while node_queue:
top = node_queue.popleft()
traversal_list.append(top.data)
if top.left:
node_queue.append(top.left)
if top.right:
node_queue.append(top.right)
if write:
for item in traversal_list:
print(item, end=' ')
return traversal_list
def main():
"""
Tree Structure:
1
/ \
2 3
/ \
4 5
"""
tree = BinaryTree(1)
tree.left = BinaryTree(2)
tree.right = BinaryTree(3)
tree.left.left = BinaryTree(4)
tree.left.right = BinaryTree(5)
assert tree.inorder_traversal(write=False) == [4, 2, 5, 1, 3]
assert tree.preorder_traversal(write=False) == [1, 2, 4, 5, 3]
assert tree.postorder_traversal(write=False) == [1, 3, 2, 5, 4]
assert tree.levelorder_traversal(write=False) == [1, 2, 3, 4, 5]
if __name__ == '__main__':
main()
| [((136, 21, 136, 28), 'collections.deque', 'deque', ({}, {}), '()', False, 'from collections import deque\n')] |
rohankapoorcom/vaddio_conferenceshot | custom_components/vaddio_conferenceshot/const.py | 71744710df10f77e21e9e7568e3f6c7175b0d11d | import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_PATH, CONF_USERNAME
DOMAIN = "vaddio_conferenceshot"
DATA_SCHEMA = vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
}
)
SERVICE_RECALL_PRESET = "move_to_preset"
ATTR_PRESET_ID = "preset"
| [((10, 8, 10, 31), 'voluptuous.Required', 'vol.Required', ({(10, 21, 10, 30): 'CONF_HOST'}, {}), '(CONF_HOST)', True, 'import voluptuous as vol\n'), ((11, 8, 11, 35), 'voluptuous.Required', 'vol.Required', ({(11, 21, 11, 34): 'CONF_USERNAME'}, {}), '(CONF_USERNAME)', True, 'import voluptuous as vol\n'), ((12, 8, 12, 35), 'voluptuous.Required', 'vol.Required', ({(12, 21, 12, 34): 'CONF_PASSWORD'}, {}), '(CONF_PASSWORD)', True, 'import voluptuous as vol\n')] |
lightonai/lightonml | lightonml/opu.py | 451327cccecdca4e8ec65df30f30d3fd8ad2194f | # Copyright (c) 2020 LightOn, All Rights Reserved.
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
"""
This module contains the OPU class
"""
import time
from math import sqrt
import pkg_resources
from lightonml.encoding.base import NoEncoding, NoDecoding
import warnings
from typing import Optional, Union, Tuple, TYPE_CHECKING
import numpy as np
from contextlib import ExitStack
import attr
import inspect
import lightonml
from lightonml.internal.config import get_host_option, opu_version
from lightonml.internal import config, output_roi, utils, types
from lightonml.internal.user_input import OpuUserInput, InputTraits
from lightonml.internal.simulated_device import SimulatedOpuDevice
from lightonml.context import ContextArray
from lightonml.internal.settings import OpuSettings, TransformSettings
from lightonml.internal.runner import TransformRunner, FitTransformRunner
from lightonml.internal.types import InputRoiStrategy, IntOrTuple, TransformOutput, AcqState
from lightonml.types import OutputRescaling
# Import lightonopu only for typechecking, as it's an optional module and may not be present
if TYPE_CHECKING:
from lightonopu.internal.device import OpuDevice
# noinspection PyPep8Naming
class OPU:
"""Interface to the OPU.
.. math:: \\mathbf{y} = \\lvert \\mathbf{R} \\mathbf{x} \\rvert^2 \\mbox{ (non-linear transform, the default)}
.. math:: \\mathbf{y} = \\mathbf{R}\\mathbf{x} \\mbox{ (linear transform)}
Main methods are `transform`, `linear_transform`, `fit1d` and `fit2d`,
and accept NumPy arrays or PyTorch tensors.
The non-linear transform (`transform`) is a native operation for the OPU, and performs at a higher
speed than `linear_transform`.
Acquiring/releasing hardware device resources is done by open/close and a
context-manager interface.
Unless `open_at_init=False`, these resources are acquired automatically at init.
If another process or kernel has not released the resources, an error will be
raised, call `close()` or shutdown the kernel on the OPU object to release it.
Parameters
----------
n_components : int,
dimensionality of the target projection space.
opu_device : OpuDevice or SimulatedOpuDevice, optional
optical processing unit instance linked to a physical or simulated device.
If not provided, a device is properly instantiated.
If opu_device is of type SimulatedOpuDevice, the random matrix is generated
at __init__, using max_n_features and n_components
max_n_features: int, optional
maximum number of binary features that the OPU will transform
used only if opu_device is a SimulatedOpuDevice,
in order to initiate the random matrix
config_file : str, optional
path to the configuration file (for dev purpose)
config_override: dict, optional
for override of the config_file (for dev purpose)
verbose_level: int, optional
deprecated, use lightonml.set_verbose_level() instead
.. seealso:: `lightonml.set_verbose_level`
input_roi_strategy: types.InputRoiStrategy, optional
describes how to display the features on the input device
.. seealso:: `lightonml.internal.types.InputRoiStrategy`
open_at_init: bool, optional
forces the setting of acquiring hardware resource at init. If
not provided, follow system's setting (usually True)
disable_pbar: bool, optional
disable display of the progress bar when verbose_level is set to 1
simulated: bool, optional
performs the random projection using CPU, in case no OPU is available on your machine
the random matrix is then generated at __init__, using max_n_features and n_components
rescale: types.OutputRescaling, optional,
output rescaling method for `linear_transform`.
Ignored by `transform`.
.. seealso:: `lightonml.types.OutputRescaling`
Attributes
----------
n_components: int
dimensionality of the target projection space.
rescale: types.OutputRescaling,
output rescaling method for `linear_transform`.
Ignored by `transform`.
max_n_features: int
maximum number of binary features that the OPU will transform
writeable only if opu_device is a SimulatedOpuDevice,
in order to initiate or resize the random matrix
device: OpuDevice or SimulatedOpuDevice
underlying hardware that performs transformation (read-only)
input_roi_strategy: types.InputRoiStrategy, optional
describes how to display the features on the input device
"""
def __init__(self, n_components: int = 200000,
opu_device: Optional[Union["OpuDevice", SimulatedOpuDevice]] = None,
max_n_features: int = 1000, config_file: str = "",
config_override: dict = None, verbose_level: int = -1,
input_roi_strategy: types.InputRoiStrategy = types.InputRoiStrategy.full,
open_at_init: bool = None, disable_pbar=False, simulated=False,
rescale: Union[OutputRescaling, str] = OutputRescaling.variance):
self.__opu_config = None
self.__config_file = config_file
self.__config_override = config_override
self._max_n_features = max_n_features
self.disable_pbar = disable_pbar
self.rescale = rescale
# Get trace and print functions
if verbose_level != -1:
warnings.warn("Verbose level arg will removed in 1.3, "
"Use lightonml.set_verbose_level instead",
DeprecationWarning)
lightonml.set_verbose_level(verbose_level)
else:
verbose_level = lightonml.get_verbose_level()
self._debug = lightonml.get_debug_fn()
self._trace = lightonml.get_trace_fn()
self._print = lightonml.get_print_fn()
no_config_msg = "No configuration files for the OPU was found on this machine.\n" \
"You may want to run the OPU in a simulated manner, by passing the " \
"simulated argument to True at init.\n" \
"See https://docs.lighton.ai/notes/get_started.html#Simulating-an-OPU " \
"for more details.\n" \
"See also https://lighton.ai/products for getting access to our technology."
if simulated and opu_device is not None:
raise ValueError("simulated and opu_device arguments are conflicting")
# Device init, or take the one passed as input
if opu_device:
if type(opu_device).__name__ not in ["SimulatedOpuDevice", "OpuDevice"]:
raise TypeError("opu_device must be of type SimulatedOpuDevice or OpuDevice")
self.device = opu_device
elif simulated:
self.device = SimulatedOpuDevice()
else:
# Instantiate device directly
from lightonopu.internal.device import OpuDevice
if not self.__config_file and not config.host_has_opu_config():
# Looks like there's no OPU on this host as we didn't find configuration files
raise RuntimeError(no_config_msg)
opu_type = self.config["type"]
frametime_us = self.config["input"]["frametime_us"]
exposure_us = self.config["output"]["exposure_us"]
seq_nb_prelim = self.config.get("sequence_nb_prelim", 0)
name = self.config["name"]
self.device = OpuDevice(opu_type, frametime_us, exposure_us, seq_nb_prelim,
None, verbose_level, name)
self._base_frametime_us = self.device.frametime_us
self._base_exposure_us = self.device.exposure_us
if self._s.simulated:
# build the random matrix if not done already
self._resize_rnd_matrix(max_n_features, n_components)
else:
# Make sure lightonopu is at 1.4.1 or later, needed for linear_reconstruction
pkg_resources.require("lightonopu>=1.4.1")
# initialize linear_reconstruction library
from lightonopu import linear_reconstruction
linear_reconstruction.init(np.prod(self.device.input_shape))
self._output_roi = output_roi.OutputRoi(self.device.output_shape_max,
self.device.output_roi_strategy,
self._s.allowed_roi, self._s.min_n_components)
# This also sets the output ROI
self.n_components = n_components
self.input_roi_strategy = input_roi_strategy
# Runner initialized when entering fit
self._runner = None # type: Optional[TransformRunner]
# ExitStack for device acquisition, initialized when entering fit
self._acq_stack = ExitStack()
self._trace("OPU initialized")
# Open at init, unless relevant host.json option is False
if open_at_init is None:
open_at_init = get_host_option("lightonml_open_at_init", True)
if open_at_init:
self.open()
def _tr_settings(self, no_input=False, **override) -> TransformSettings:
"""Returns transform settings for feeding to TransformRunner"""
init = TransformSettings(self.input_roi_strategy, self.n_components)
settings = attr.evolve(init, **override)
if no_input and self.input_roi_strategy is InputRoiStrategy.auto:
# If no input_roi, replace auto by full strategy
settings.input_roi_strategy = InputRoiStrategy.full
assert settings.input_roi is None
return settings
def fit1d(self, X=None, n_features: int = None,
packed: bool = False, online=False, **override):
"""
Configure OPU transform for 1d vectors
The function can be either called with input vector, for fitting OPU
parameters to it, or just vector dimensions, with ``n_features``.
When input is bit-packed the packed flag must be set to True.
When input vectors must be transformed one by one, performance will
be improved with the online flag set to True.
Parameters
----------
X: np.ndarray or torch.Tensor
Fit will be made on this vector to optimize transform parameters
n_features: int
Number of features for the input, necessary if X parameter isn't provided
packed: bool
Set to true if the input vectors will be already bit-packed
online: bool, optional
Set to true if the transforms will be made one vector after the other
defaults to False
override: dict, optional
keyword args for overriding transform settings (advanced parameters)
"""
return self.__fit(X, n_features, packed, online, False, **override)
def fit2d(self, X=None, n_features: Tuple[int, int] = None,
packed: bool = False, online=False, **override):
"""
Configure OPU transform for 2d vectors
The function can be either called with input vector, for fitting OPU
parameters to it, or just vector dimensions, with `n_features`.
When input is bit-packed the packed flag must be set to True.
Number of features must be then provided with `n_features`
When input vectors must be transformed one by one, performance will
be improved with the online flag set to True.
Parameters
----------
X: np.ndarray or torch.Tensor
a 2d input vector, or batch of 2d input_vectors, binary encoded, packed or not
n_features: tuple(int)
Number of features for the input, necessary if X parameter isn't provided, or
if input is bit-packed
packed: bool, optional
whether the input data is in bit-packed representation
if True, each input vector is assumed to be a 1d array, and the "real" number
of features must be provided as n_features
defaults to False
online: bool, optional
Set to true if the transforms will be made one vector after the other
defaults to False
override: dict, optional
keyword args for overriding transform settings (advanced parameters)
"""
return self.__fit(X, n_features, packed, online, True, **override)
def transform(self, X, encoder_cls=NoEncoding, decoder_cls=NoDecoding) -> TransformOutput:
"""
Performs the nonlinear random projections of one or several input vectors.
The `fit1d` or `fit2d` method must be called before, for setting vector dimensions
or online option.
If you need to transform one vector after each other, add `online=True` in the fit function.
Parameters
----------
X: np.ndarray or torch.Tensor
input vector, or batch of input vectors.
Each vector must have the same dimensions as the one given in `fit1d` or `fit2d`.
encoder_cls: encoder.base.BaseTransformer, optional
class or instance of class that transform the input into binary vectors to be processed by the opu.
decoder_cls: encoder.base.BaseTransformer, optional
class or instance of class that transforms the output of the opu back into the appropriate format.
Returns
-------
Y: np.ndarray or torch.Tensor
complete array of nonlinear random projections of X,
of size self.n_components
If input is an ndarray, type is actually ContextArray,
with a context attribute to add metadata
"""
assert self._runner, "Call fit1d or fit2d before transform"
assert self.device.active, "OPU device isn't active, use opu.open() or \"with opu:\""
if inspect.isclass(encoder_cls):
encoder = encoder_cls()
else:
encoder = encoder_cls
X_enc = encoder.transform(X)
user_input = OpuUserInput.from_traits(X_enc, self._runner.traits)
self._debug(str(user_input))
if user_input.is_batch and not self._s.simulated:
# With batch input start acquisition first
assert self.device.acq_state.value != AcqState.online.value, \
"Can't transform a batch of vectors when acquisition is" \
" in online mode, only single vectors"
with self.device.acquiring(n_images=self._s.n_samples_by_pass):
out = self._runner.transform(user_input)
else:
out = self._runner.transform(user_input)
return self._post_transform(out, user_input, encoder, decoder_cls)
def linear_transform(self, X, encoder_cls=NoEncoding, decoder_cls=NoDecoding) -> TransformOutput:
"""
Do a linear transform of X, for Nitro (non-linear) photonic cores.
Parameters
----------
X: np.ndarray or torch.Tensor
input vector, or batch of input vectors.
Each vector must have the same dimensions as the one given in `fit1d` or `fit2d`.
encoder_cls: encoding.base.BaseTransformer, optional
class or instance of class that transform the input into binary vectors to be processed by the opu.
decoder_cls: encoding.base.BaseTransformer, optional
class or instance of class that transforms the output of the opu back into the appropriate format.
Returns
-------
Y: np.ndarray or torch.Tensor
complete array of nonlinear random projections of X,
of size self.n_components
If input is an ndarray, type is actually ContextArray,
with a context attribute to add metadata
"""
assert self._runner, "Call fit1d or fit2d before linear_transform"
traits = self._runner.traits
if traits.packed:
# TODO implement for packed
raise RuntimeError("Linear transform isn't yet implemented for packed input :/")
if inspect.isclass(encoder_cls):
encoder = encoder_cls()
else:
encoder = encoder_cls
X_enc = encoder.transform(X)
user_input = OpuUserInput.from_traits(X_enc, traits)
_, result_ctx = self._raw_linear_transform(X_enc, traits, user_input)
# Decoding, add context, and optional convert back to torch if needed
output = self._post_transform(result_ctx, user_input, encoder, decoder_cls)
# Rescale the output, intentionally after the decoding step
if self.rescale is OutputRescaling.variance:
n_features = user_input.n_features_s
output = output / (self._s.stdev * sqrt(n_features))
elif self.rescale is OutputRescaling.norm:
output = output / (self._s.stdev * sqrt(self.n_components))
return output
def transform1d(self, *args, **kwargs):
raise RuntimeError("transform1d is deprecated, you must now use fit1d and transform")
def transform2d(self, *args, **kwargs):
raise RuntimeError("transform2d is deprecated, you must now use fit2d and transform")
def fit_transform1d(self, X, packed: bool = False,
**override) -> ContextArray:
"""Performs the nonlinear random projections of 1d input vector(s).
This function is the one-liner equivalent of `fit1d` and `transform` calls.
.. warning:: when making several transform calls, prefer calling `fit1d`
and then `transform`, or you might encounter an inconsistency in the
transformation matrix.
The input data can be bit-packed, where ``n_features = 8*X.shape[-1]``
Otherwise ``n_features = X.shape[-1]``
If tqdm module is available, it is used for progress display
Parameters
----------
X: np.ndarray or torch.Tensor
a 1d input vector, or batch of 1d input_vectors, binary encoded, packed or not
batch can be 1d or 2d. In all cases ``output.shape[:-1] = X.shape[:-1]``
packed: bool, optional
whether the input data is in bit-packed representation
defaults to False
override: keyword args for overriding transform settings (advanced parameters)
Returns
-------
Y: np.ndarray or torch.Tensor
complete array of nonlinear random projections of X,
of size self.n_components
If input is an ndarray, type is actually ContextArray,
with a context attribute to add metadata
"""
self.fit1d(X, None, packed, False, **override)
return self.transform(X)
def fit_transform2d(self, X, packed: bool = False, n_2d_features=None,
**override) -> ContextArray:
"""Performs the nonlinear random projections of 2d input vector(s).
This function is the one-liner equivalent of `fit2d` and `transform` calls.
.. warning:: when making several transform calls, prefer calling `fit2d`
and then `transform`, or you might encounter an inconsistency in the
transformation matrix.
If tqdm module is available, it is used for progress display
Parameters
----------
X: np.ndarray or torch.Tensor
a 2d input vector, or batch of 2d input_vectors, binary encoded, packed or not
packed: bool, optional
whether the input data is in bit-packed representation
if True, each input vector is assumed to be a 1d array, and the "real" number
of features must be provided as n_2d_features
defaults to False
n_2d_features: list, tuple or np.ndarray of length 2
If the input is bit-packed, specifies the shape of each input vector.
Not needed if the input isn't bit-packed.
override: keyword args for overriding transform settings (advanced parameters)
Returns
-------
Y: np.ndarray or torch.Tensor
complete array of nonlinear random projections of X,
of size self.n_components
If input is an ndarray, type is actually ContextArray,
with a context attribute to add metadata
"""
self.fit2d(X, n_2d_features, packed, False, **override)
return self.transform(X)
def __fit(self, X, n_features: IntOrTuple,
packed: bool, online: bool, is_2d_features: bool,
**override):
"""Internal working of the fitXd calls
Instantiates a TransformRunner, and start online acq if needs be.
"""
if X is not None:
# Input is provided, do the fit with user input
user_input = OpuUserInput.from_input(X, packed, is_2d_features, n_features)
tr_settings = self._tr_settings(no_input=False, **override)
self._runner = FitTransformRunner(self._s, tr_settings, user_input,
device=self.device,
disable_pbar=self.disable_pbar)
else:
# Only dimensions are provided, no fitting happens on input
assert n_features, "either input vector or n_features must be specified"
# tr_settings has no input_roi, since it uses X to compute it
tr_settings = self._tr_settings(no_input=True, **override)
traits = InputTraits(n_features, packed)
self._runner = TransformRunner(self._s, tr_settings, traits,
device=self.device,
disable_pbar=self.disable_pbar)
self._acq_stack.close()
if online:
if self._s.no_single_transform:
raise RuntimeError("Online transform isn't available with this OPU")
# Start acquisition only if online. Batch transform start their own.
self._acq_stack.enter_context(self.device.acquiring(online=True))
@staticmethod
def _post_transform(output, user_input, encoder, decoder_cls):
"""Final steps after transform
1. reshape
2. decode the output
3. convert to tensor if user input was tensor
"""
output = user_input.reshape_output(output)
# If encoder has get_params method, it's for transmitting it to decoder init
if inspect.isclass(decoder_cls):
if hasattr(encoder, "get_params"):
decoder = decoder_cls(**encoder.get_params())
else:
decoder = decoder_cls()
else:
decoder = decoder_cls
output = decoder.transform(output)
if user_input.is_tensor:
# noinspection PyPackageRequirements,PyUnresolvedReferences
import torch
return torch.from_numpy(output)
else:
return output
def _raw_linear_transform(self, X, traits=None, user_input=None):
"""
Do linear_transform of X, and return both raw OPU output and decoded output in a tuple
"""
if traits is None:
assert self._runner, "Call fit1d or fit2d before linear_transform"
traits = self._runner.traits
if user_input is None:
user_input = OpuUserInput.from_traits(X, traits)
if self._s.simulated:
prepared_X = X
else:
assert self.device.acq_state.value != AcqState.online.value, \
"Can't do linear transform when acquisition is" \
" in online mode, only single vectors"
assert self._runner.t.input_roi_strategy == InputRoiStrategy.full, \
"ROI strategy must be full for linear_transform to be correct.\n" \
"Set input_roi_strategy attribute to InputRoiStrategy.full."
# X2 is now numpy 2D, whatever the initial shape and the type (torch or numpy)
X2 = user_input.reshape_input(raveled_features=True, leave_single_dim=True)
try:
import lightonopu.linear_reconstruction as reconstruction
except ImportError:
raise RuntimeError("Need a lightonopu version with linear_reconstruction module")
start = time.time()
prepared_X = reconstruction.encode_batch(X2)
self._trace(f"Encoding time {time.time() - start} s")
# Restore the dimension after batch encoding to something suitable for formatting
prepared_X = user_input.unravel_features(prepared_X)
# Run the OPU transform
prepared_input = OpuUserInput.from_traits(prepared_X, traits)
start = time.time()
with self.device.acquiring(n_images=self._s.n_samples_by_pass):
rp_opu = self._runner.transform(prepared_input, linear=True)
self._trace(f"Transform time {time.time() - start} s")
if self._s.simulated:
result_ctx = rp_opu
else:
# Decoding forgets about the context, re-add it to result afterwards
start = time.time()
result = reconstruction.decode_batch(rp_opu)
self._trace(f"Decoding time {time.time() - start} s")
result_ctx = ContextArray(result, rp_opu.context)
return rp_opu, result_ctx
def __enter__(self):
"""Context manager interface that acquires hardware resources
used by the OPU device."""
self.__active_before_enter = self.device.active
self.open()
return self
def __exit__(self, *args):
# Don't close if OPU was already active
if not self.__active_before_enter:
self.close()
def open(self):
"""Acquires hardware resources used by the OPU device
.. seealso:: `close()` or use the context manager interface for
closing at the end af an indent block
"""
if self.device.active:
return
self.device.open()
# initial reservation for giving batch transforms a buffer ready to use
self.device.reserve(self._s.n_samples_by_pass)
if self._s.detect_trigger:
# Detect trigger issue, and take action if needed
issue = utils.detect_trigger_issue(self.device)
if issue:
# noinspection PyProtectedMember,PyUnresolvedReferences
self.device._OpuDevice__opu.nb_prelim = 1
self._debug("trigger issue detected, workaround applied")
else:
self._debug("trigger issue not detected")
self._debug("OPU opened")
def close(self):
"""Releases hardware resources used by the OPU device"""
self._acq_stack.close()
self.device.close()
self._debug("OPU closed")
@property
def config(self):
"""Returns the internal configuration object"""
# Load it when asked first time
if not self.__opu_config:
self.__opu_config = config.load_config(self.__config_file, self._trace)
if self.__config_override is not None:
utils.recurse_update(self.__opu_config, self.__config_override)
return self.__opu_config
@property
def rescale(self):
return self._rescale
@rescale.setter
def rescale(self, value):
# If str it's the enum value
if isinstance(value, str):
self._rescale = OutputRescaling[value.lower()]
else:
assert isinstance(value, OutputRescaling)
self._rescale = value
@property
def max_n_components(self):
return self._output_roi.max_components
@property
def n_components(self) -> int:
return self._n_components
@n_components.setter
def n_components(self, value: int):
if self._s.simulated:
self._resize_rnd_matrix(self.max_n_features, value)
else:
self.device.output_roi = self._output_roi.compute_roi(value)
# We used to call device.reserve here, but moved to device.acquiring()
self._n_components = value
@property
def max_n_features(self) -> int:
return self._s.max_n_features
@max_n_features.setter
def max_n_features(self, value: int):
if not self._s.simulated:
raise AttributeError("max_n_feature can't be set if device is real")
self._resize_rnd_matrix(value, self._n_components)
self._max_n_features = value
@property
def _s(self) -> OpuSettings:
"""Returns immutable settings associated with the OPU
Settings are immutable (attrs frozen), so generate it at
each call. Performance impact is negligible"""
# Get default value
pass_default = attr.fields(OpuSettings).n_samples_by_pass.default
# Common settings to both simulated and base
kwargs = {"input_shape": self.device.input_shape,
"output_max_shape": self.device.output_shape_max,
"frametime_us": self._base_frametime_us,
"exposure_us": self._base_exposure_us}
if isinstance(self.device, SimulatedOpuDevice):
# Notice we never query self.config here, in order not to
# need a configuration file for simulated device
return OpuSettings(max_n_features=self._max_n_features,
n_samples_by_pass=pass_default,
simulated=True, **kwargs
)
return OpuSettings(
max_n_features=int(np.prod(self.device.input_shape)),
# Will use defaults of OpuSettings if not found
n_samples_by_pass=self.config.get("n_samples_by_pass", pass_default),
min_batch_size=self.config["input"].get("minimum_batch_size", 0),
allowed_roi=self.config["output"].get("allowed_roi"),
# min_n_components is linked to the minimum output size
min_n_components=self.config["output"].get("minimum_output_size", 0),
ones_range=self.config["ones_range"],
n_tries=self.config.get("n_transform_tries", 5),
detect_trigger=self.config.get("detect_trigger_issue", False),
no_single_transform=self.config.get("no_single_transform", False),
stdev=self.config["output"].get("stdev", 1.),
**kwargs)
def _resize_rnd_matrix(self, n_features: int, n_components: int):
"""Resize device's random matrix"""
assert isinstance(self.device, SimulatedOpuDevice)
rnd_mat = self.device.random_matrix
if rnd_mat is None or rnd_mat.shape != (n_features, n_components):
self._print("OPU: computing the random matrix... ", end='', flush=True)
self.device.build_random_matrix(n_features, n_components)
self._print("OK")
def version(self, devices=False):
"""Returns a multi-line string containing name and versions of the OPU"""
version = []
# Build OPU name
if not self._s.simulated:
version.append(opu_version(self.__opu_config))
# module version
version.append(f"lightonml version {lightonml.__version__}")
try:
# noinspection PyUnresolvedReferences
import lightonopu
version.append(f"lightonopu version {lightonopu.__version__}")
except ImportError:
pass
if devices:
version.append(self.device.versions())
return '\n'.join(version)
def __getstate__(self):
state = self.__dict__.copy()
# Remove logging functions, they can't be pickled
state.pop("_debug")
state.pop("_trace")
state.pop("_print")
# acq stack can't be pickled, will be restored
state.pop("_acq_stack")
# If acquisition is ongoing, close it
if not self._s.simulated:
state["__online_acq"] = self.device.acq_state.value == AcqState.online.value
self._acq_stack.close()
# Device itself is closed on pickling
return state
def __setstate__(self, state):
self.__dict__.update(state)
# Restore logging functions removed at getstate
self._debug = lightonml.get_debug_fn()
self._trace = lightonml.get_trace_fn()
self._print = lightonml.get_print_fn()
self._acq_stack = ExitStack()
# Restore online acquisition if it was the case
if state.get("__online_acq", False):
self._acq_stack.enter_context(self.device.acquiring(online=True))
| [((132, 22, 132, 46), 'lightonml.get_debug_fn', 'lightonml.get_debug_fn', ({}, {}), '()', False, 'import lightonml\n'), ((133, 22, 133, 46), 'lightonml.get_trace_fn', 'lightonml.get_trace_fn', ({}, {}), '()', False, 'import lightonml\n'), ((134, 22, 134, 46), 'lightonml.get_print_fn', 'lightonml.get_print_fn', ({}, {}), '()', False, 'import lightonml\n'), ((188, 26, 188, 37), 'contextlib.ExitStack', 'ExitStack', ({}, {}), '()', False, 'from contextlib import ExitStack\n'), ((199, 15, 199, 76), 'lightonml.internal.settings.TransformSettings', 'TransformSettings', ({(199, 33, 199, 56): 'self.input_roi_strategy', (199, 58, 199, 75): 'self.n_components'}, {}), '(self.input_roi_strategy, self.n_components)', False, 'from lightonml.internal.settings import OpuSettings, TransformSettings\n'), ((200, 19, 200, 48), 'attr.evolve', 'attr.evolve', ({(200, 31, 200, 35): 'init'}, {}), '(init, **override)', False, 'import attr\n'), ((299, 11, 299, 39), 'inspect.isclass', 'inspect.isclass', ({(299, 27, 299, 38): 'encoder_cls'}, {}), '(encoder_cls)', False, 'import inspect\n'), ((306, 21, 306, 73), 'lightonml.internal.user_input.OpuUserInput.from_traits', 'OpuUserInput.from_traits', ({(306, 46, 306, 51): 'X_enc', (306, 53, 306, 72): 'self._runner.traits'}, {}), '(X_enc, self._runner.traits)', False, 'from lightonml.internal.user_input import OpuUserInput, InputTraits\n'), ((349, 11, 349, 39), 'inspect.isclass', 'inspect.isclass', ({(349, 27, 349, 38): 'encoder_cls'}, {}), '(encoder_cls)', False, 'import inspect\n'), ((356, 21, 356, 60), 'lightonml.internal.user_input.OpuUserInput.from_traits', 'OpuUserInput.from_traits', ({(356, 46, 356, 51): 'X_enc', (356, 53, 356, 59): 'traits'}, {}), '(X_enc, traits)', False, 'from lightonml.internal.user_input import OpuUserInput, InputTraits\n'), ((488, 11, 488, 39), 'inspect.isclass', 'inspect.isclass', ({(488, 27, 488, 38): 'decoder_cls'}, {}), '(decoder_cls)', False, 'import inspect\n'), ((540, 25, 540, 69), 'lightonml.internal.user_input.OpuUserInput.from_traits', 'OpuUserInput.from_traits', ({(540, 50, 540, 60): 'prepared_X', (540, 62, 540, 68): 'traits'}, {}), '(prepared_X, traits)', False, 'from lightonml.internal.user_input import OpuUserInput, InputTraits\n'), ((541, 16, 541, 27), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((734, 22, 734, 46), 'lightonml.get_debug_fn', 'lightonml.get_debug_fn', ({}, {}), '()', False, 'import lightonml\n'), ((735, 22, 735, 46), 'lightonml.get_trace_fn', 'lightonml.get_trace_fn', ({}, {}), '()', False, 'import lightonml\n'), ((736, 22, 736, 46), 'lightonml.get_print_fn', 'lightonml.get_print_fn', ({}, {}), '()', False, 'import lightonml\n'), ((737, 26, 737, 37), 'contextlib.ExitStack', 'ExitStack', ({}, {}), '()', False, 'from contextlib import ExitStack\n'), ((126, 12, 128, 45), 'warnings.warn', 'warnings.warn', ({(126, 26, 127, 67): '"""Verbose level arg will removed in 1.3, Use lightonml.set_verbose_level instead"""', (128, 26, 128, 44): 'DeprecationWarning'}, {}), "(\n 'Verbose level arg will removed in 1.3, Use lightonml.set_verbose_level instead'\n , DeprecationWarning)", False, 'import warnings\n'), ((129, 12, 129, 54), 'lightonml.set_verbose_level', 'lightonml.set_verbose_level', ({(129, 40, 129, 53): 'verbose_level'}, {}), '(verbose_level)', False, 'import lightonml\n'), ((131, 28, 131, 57), 'lightonml.get_verbose_level', 'lightonml.get_verbose_level', ({}, {}), '()', False, 'import lightonml\n'), ((174, 12, 174, 54), 'pkg_resources.require', 'pkg_resources.require', ({(174, 34, 174, 53): '"""lightonopu>=1.4.1"""'}, {}), "('lightonopu>=1.4.1')", False, 'import pkg_resources\n'), ((179, 31, 181, 98), 'lightonml.internal.output_roi.OutputRoi', 'output_roi.OutputRoi', ({(179, 52, 179, 80): 'self.device.output_shape_max', (180, 52, 180, 83): 'self.device.output_roi_strategy', (181, 52, 181, 71): 'self._s.allowed_roi', (181, 73, 181, 97): 'self._s.min_n_components'}, {}), '(self.device.output_shape_max, self.device.\n output_roi_strategy, self._s.allowed_roi, self._s.min_n_components)', False, 'from lightonml.internal import config, output_roi, utils, types\n'), ((193, 27, 193, 74), 'lightonml.internal.config.get_host_option', 'get_host_option', ({(193, 43, 193, 67): '"""lightonml_open_at_init"""', (193, 69, 193, 73): 'True'}, {}), "('lightonml_open_at_init', True)", False, 'from lightonml.internal.config import get_host_option, opu_version\n'), ((457, 25, 457, 87), 'lightonml.internal.user_input.OpuUserInput.from_input', 'OpuUserInput.from_input', ({(457, 49, 457, 50): 'X', (457, 52, 457, 58): 'packed', (457, 60, 457, 74): 'is_2d_features', (457, 76, 457, 86): 'n_features'}, {}), '(X, packed, is_2d_features, n_features)', False, 'from lightonml.internal.user_input import OpuUserInput, InputTraits\n'), ((459, 27, 461, 77), 'lightonml.internal.runner.FitTransformRunner', 'FitTransformRunner', (), '', False, 'from lightonml.internal.runner import TransformRunner, FitTransformRunner\n'), ((467, 21, 467, 52), 'lightonml.internal.user_input.InputTraits', 'InputTraits', ({(467, 33, 467, 43): 'n_features', (467, 45, 467, 51): 'packed'}, {}), '(n_features, packed)', False, 'from lightonml.internal.user_input import OpuUserInput, InputTraits\n'), ((468, 27, 470, 74), 'lightonml.internal.runner.TransformRunner', 'TransformRunner', (), '', False, 'from lightonml.internal.runner import TransformRunner, FitTransformRunner\n'), ((501, 19, 501, 43), 'torch.from_numpy', 'torch.from_numpy', ({(501, 36, 501, 42): 'output'}, {}), '(output)', False, 'import torch\n'), ((514, 25, 514, 60), 'lightonml.internal.user_input.OpuUserInput.from_traits', 'OpuUserInput.from_traits', ({(514, 50, 514, 51): 'X', (514, 53, 514, 59): 'traits'}, {}), '(X, traits)', False, 'from lightonml.internal.user_input import OpuUserInput, InputTraits\n'), ((534, 20, 534, 31), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((535, 25, 535, 56), 'lightonopu.linear_reconstruction.encode_batch', 'reconstruction.encode_batch', ({(535, 53, 535, 55): 'X2'}, {}), '(X2)', True, 'import lightonopu.linear_reconstruction as reconstruction\n'), ((550, 20, 550, 31), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((551, 21, 551, 56), 'lightonopu.linear_reconstruction.decode_batch', 'reconstruction.decode_batch', ({(551, 49, 551, 55): 'rp_opu'}, {}), '(rp_opu)', True, 'import lightonopu.linear_reconstruction as reconstruction\n'), ((554, 25, 554, 61), 'lightonml.context.ContextArray', 'ContextArray', ({(554, 38, 554, 44): 'result', (554, 46, 554, 60): 'rp_opu.context'}, {}), '(result, rp_opu.context)', False, 'from lightonml.context import ContextArray\n'), ((582, 20, 582, 59), 'lightonml.internal.utils.detect_trigger_issue', 'utils.detect_trigger_issue', ({(582, 47, 582, 58): 'self.device'}, {}), '(self.device)', False, 'from lightonml.internal import config, output_roi, utils, types\n'), ((603, 32, 603, 83), 'lightonml.internal.config.load_config', 'config.load_config', ({(603, 51, 603, 69): 'self.__config_file', (603, 71, 603, 82): 'self._trace'}, {}), '(self.__config_file, self._trace)', False, 'from lightonml.internal import config, output_roi, utils, types\n'), ((667, 19, 670, 32), 'lightonml.internal.settings.OpuSettings', 'OpuSettings', (), '', False, 'from lightonml.internal.settings import OpuSettings, TransformSettings\n'), ((151, 26, 151, 46), 'lightonml.internal.simulated_device.SimulatedOpuDevice', 'SimulatedOpuDevice', ({}, {}), '()', False, 'from lightonml.internal.simulated_device import SimulatedOpuDevice\n'), ((163, 26, 164, 62), 'lightonopu.internal.device.OpuDevice', 'OpuDevice', ({(163, 36, 163, 44): 'opu_type', (163, 46, 163, 58): 'frametime_us', (163, 60, 163, 71): 'exposure_us', (163, 73, 163, 86): 'seq_nb_prelim', (164, 36, 164, 40): 'None', (164, 42, 164, 55): 'verbose_level', (164, 57, 164, 61): 'name'}, {}), '(opu_type, frametime_us, exposure_us, seq_nb_prelim, None,\n verbose_level, name)', False, 'from lightonopu.internal.device import OpuDevice\n'), ((177, 39, 177, 71), 'numpy.prod', 'np.prod', ({(177, 47, 177, 70): 'self.device.input_shape'}, {}), '(self.device.input_shape)', True, 'import numpy as np\n'), ((605, 16, 605, 79), 'lightonml.internal.utils.recurse_update', 'utils.recurse_update', ({(605, 37, 605, 54): 'self.__opu_config', (605, 56, 605, 78): 'self.__config_override'}, {}), '(self.__opu_config, self.__config_override)', False, 'from lightonml.internal import config, output_roi, utils, types\n'), ((656, 23, 656, 47), 'attr.fields', 'attr.fields', ({(656, 35, 656, 46): 'OpuSettings'}, {}), '(OpuSettings)', False, 'import attr\n'), ((702, 27, 702, 57), 'lightonml.internal.config.opu_version', 'opu_version', ({(702, 39, 702, 56): 'self.__opu_config'}, {}), '(self.__opu_config)', False, 'from lightonml.internal.config import get_host_option, opu_version\n'), ((363, 47, 363, 63), 'math.sqrt', 'sqrt', ({(363, 52, 363, 62): 'n_features'}, {}), '(n_features)', False, 'from math import sqrt\n'), ((673, 31, 673, 63), 'numpy.prod', 'np.prod', ({(673, 39, 673, 62): 'self.device.input_shape'}, {}), '(self.device.input_shape)', True, 'import numpy as np\n'), ((155, 46, 155, 74), 'lightonml.internal.config.host_has_opu_config', 'config.host_has_opu_config', ({}, {}), '()', False, 'from lightonml.internal import config, output_roi, utils, types\n'), ((365, 47, 365, 70), 'math.sqrt', 'sqrt', ({(365, 52, 365, 69): 'self.n_components'}, {}), '(self.n_components)', False, 'from math import sqrt\n'), ((544, 38, 544, 49), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((536, 41, 536, 52), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((552, 41, 552, 52), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n')] |
demiurgestudios/shovel | example/shovel/bar.py | 3db497164907d3765fae182959147d19064671c7 | from shovel import task
@task
def hello(name='Foo'):
'''Prints "Hello, " followed by the provided name.
Examples:
shovel bar.hello
shovel bar.hello --name=Erin
http://localhost:3000/bar.hello?Erin'''
print('Hello, %s' % name)
@task
def args(*args):
'''Echos back all the args you give it.
This exists mostly to demonstrate the fact that shovel
is compatible with variable argument functions.
Examples:
shovel bar.args 1 2 3 4
http://localhost:3000/bar.args?1&2&3&4'''
for arg in args:
print('You said "%s"' % arg)
@task
def kwargs(**kwargs):
'''Echos back all the kwargs you give it.
This exists mostly to demonstrate that shovel is
compatible with the keyword argument functions.
Examples:
shovel bar.kwargs --foo=5 --bar 5 --howdy hey
http://localhost:3000/bar.kwargs?foo=5&bar=5&howdy=hey'''
for key, val in kwargs.items():
print('You said "%s" => "%s"' % (key, val)) | [] |
timgates42/trex-core | scripts/external_libs/scapy-2.4.3/scapy/config.py | efe94752fcb2d0734c83d4877afe92a3dbf8eccd | # This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Philippe Biondi <[email protected]>
# This program is published under a GPLv2 license
"""
Implementation of the configuration object.
"""
from __future__ import absolute_import
from __future__ import print_function
import functools
import os
import re
import time
import socket
import sys
from scapy import VERSION, base_classes
from scapy.consts import DARWIN, WINDOWS, LINUX, BSD, SOLARIS
from scapy.error import log_scapy, warning, ScapyInvalidPlatformException
from scapy.modules import six
from scapy.themes import NoTheme, apply_ipython_style
############
# Config #
############
class ConfClass(object):
def configure(self, cnf):
self.__dict__ = cnf.__dict__.copy()
def __repr__(self):
return str(self)
def __str__(self):
s = ""
keys = self.__class__.__dict__.copy()
keys.update(self.__dict__)
keys = sorted(keys)
for i in keys:
if i[0] != "_":
r = repr(getattr(self, i))
r = " ".join(r.split())
wlen = 76 - max(len(i), 10)
if len(r) > wlen:
r = r[:wlen - 3] + "..."
s += "%-10s = %s\n" % (i, r)
return s[:-1]
class Interceptor(object):
def __init__(self, name=None, default=None,
hook=None, args=None, kargs=None):
self.name = name
self.intname = "_intercepted_%s" % name
self.default = default
self.hook = hook
self.args = args if args is not None else []
self.kargs = kargs if kargs is not None else {}
def __get__(self, obj, typ=None):
if not hasattr(obj, self.intname):
setattr(obj, self.intname, self.default)
return getattr(obj, self.intname)
@staticmethod
def set_from_hook(obj, name, val):
int_name = "_intercepted_%s" % name
setattr(obj, int_name, val)
def __set__(self, obj, val):
setattr(obj, self.intname, val)
self.hook(self.name, val, *self.args, **self.kargs)
def _readonly(name):
default = Conf.__dict__[name].default
Interceptor.set_from_hook(conf, name, default)
raise ValueError("Read-only value !")
ReadOnlyAttribute = functools.partial(
Interceptor,
hook=(lambda name, *args, **kwargs: _readonly(name))
)
ReadOnlyAttribute.__doc__ = "Read-only class attribute"
class ProgPath(ConfClass):
universal_open = "open" if DARWIN else "xdg-open"
pdfreader = universal_open
psreader = universal_open
svgreader = universal_open
dot = "dot"
display = "display"
tcpdump = "tcpdump"
tcpreplay = "tcpreplay"
hexedit = "hexer"
tshark = "tshark"
wireshark = "wireshark"
ifconfig = "ifconfig"
class ConfigFieldList:
def __init__(self):
self.fields = set()
self.layers = set()
@staticmethod
def _is_field(f):
return hasattr(f, "owners")
def _recalc_layer_list(self):
self.layers = {owner for f in self.fields for owner in f.owners}
def add(self, *flds):
self.fields |= {f for f in flds if self._is_field(f)}
self._recalc_layer_list()
def remove(self, *flds):
self.fields -= set(flds)
self._recalc_layer_list()
def __contains__(self, elt):
if isinstance(elt, base_classes.Packet_metaclass):
return elt in self.layers
return elt in self.fields
def __repr__(self):
return "<%s [%s]>" % (self.__class__.__name__, " ".join(str(x) for x in self.fields)) # noqa: E501
class Emphasize(ConfigFieldList):
pass
class Resolve(ConfigFieldList):
pass
class Num2Layer:
def __init__(self):
self.num2layer = {}
self.layer2num = {}
def register(self, num, layer):
self.register_num2layer(num, layer)
self.register_layer2num(num, layer)
def register_num2layer(self, num, layer):
self.num2layer[num] = layer
def register_layer2num(self, num, layer):
self.layer2num[layer] = num
def __getitem__(self, item):
if isinstance(item, base_classes.Packet_metaclass):
return self.layer2num[item]
return self.num2layer[item]
def __contains__(self, item):
if isinstance(item, base_classes.Packet_metaclass):
return item in self.layer2num
return item in self.num2layer
def get(self, item, default=None):
return self[item] if item in self else default
def __repr__(self):
lst = []
for num, layer in six.iteritems(self.num2layer):
if layer in self.layer2num and self.layer2num[layer] == num:
dir = "<->"
else:
dir = " ->"
lst.append((num, "%#6x %s %-20s (%s)" % (num, dir, layer.__name__,
layer._name)))
for layer, num in six.iteritems(self.layer2num):
if num not in self.num2layer or self.num2layer[num] != layer:
lst.append((num, "%#6x <- %-20s (%s)" % (num, layer.__name__,
layer._name)))
lst.sort()
return "\n".join(y for x, y in lst)
class LayersList(list):
def __init__(self):
list.__init__(self)
self.ldict = {}
def __repr__(self):
return "\n".join("%-20s: %s" % (l.__name__, l.name) for l in self)
def register(self, layer):
self.append(layer)
if layer.__module__ not in self.ldict:
self.ldict[layer.__module__] = []
self.ldict[layer.__module__].append(layer)
def layers(self):
result = []
# This import may feel useless, but it is required for the eval below
import scapy # noqa: F401
for lay in self.ldict:
doc = eval(lay).__doc__
result.append((lay, doc.strip().split("\n")[0] if doc else lay))
return result
class CommandsList(list):
def __repr__(self):
s = []
for l in sorted(self, key=lambda x: x.__name__):
doc = l.__doc__.split("\n")[0] if l.__doc__ else "--"
s.append("%-20s: %s" % (l.__name__, doc))
return "\n".join(s)
def register(self, cmd):
self.append(cmd)
return cmd # return cmd so that method can be used as a decorator
def lsc():
"""Displays Scapy's default commands"""
print(repr(conf.commands))
class CacheInstance(dict, object):
__slots__ = ["timeout", "name", "_timetable", "__dict__"]
def __init__(self, name="noname", timeout=None):
self.timeout = timeout
self.name = name
self._timetable = {}
def flush(self):
self.__init__(name=self.name, timeout=self.timeout)
def __getitem__(self, item):
if item in self.__slots__:
return object.__getattribute__(self, item)
val = dict.__getitem__(self, item)
if self.timeout is not None:
t = self._timetable[item]
if time.time() - t > self.timeout:
raise KeyError(item)
return val
def get(self, item, default=None):
# overloading this method is needed to force the dict to go through
# the timetable check
try:
return self[item]
except KeyError:
return default
def __setitem__(self, item, v):
if item in self.__slots__:
return object.__setattr__(self, item, v)
self._timetable[item] = time.time()
dict.__setitem__(self, item, v)
def update(self, other):
for key, value in six.iteritems(other):
# We only update an element from `other` either if it does
# not exist in `self` or if the entry in `self` is older.
if key not in self or self._timetable[key] < other._timetable[key]:
dict.__setitem__(self, key, value)
self._timetable[key] = other._timetable[key]
def iteritems(self):
if self.timeout is None:
return six.iteritems(self.__dict__)
t0 = time.time()
return ((k, v) for (k, v) in six.iteritems(self.__dict__) if t0 - self._timetable[k] < self.timeout) # noqa: E501
def iterkeys(self):
if self.timeout is None:
return six.iterkeys(self.__dict__)
t0 = time.time()
return (k for k in six.iterkeys(self.__dict__) if t0 - self._timetable[k] < self.timeout) # noqa: E501
def __iter__(self):
return six.iterkeys(self.__dict__)
def itervalues(self):
if self.timeout is None:
return six.itervalues(self.__dict__)
t0 = time.time()
return (v for (k, v) in six.iteritems(self.__dict__) if t0 - self._timetable[k] < self.timeout) # noqa: E501
def items(self):
if self.timeout is None:
return dict.items(self)
t0 = time.time()
return [(k, v) for (k, v) in six.iteritems(self.__dict__) if t0 - self._timetable[k] < self.timeout] # noqa: E501
def keys(self):
if self.timeout is None:
return dict.keys(self)
t0 = time.time()
return [k for k in six.iterkeys(self.__dict__) if t0 - self._timetable[k] < self.timeout] # noqa: E501
def values(self):
if self.timeout is None:
return list(six.itervalues(self))
t0 = time.time()
return [v for (k, v) in six.iteritems(self.__dict__) if t0 - self._timetable[k] < self.timeout] # noqa: E501
def __len__(self):
if self.timeout is None:
return dict.__len__(self)
return len(self.keys())
def summary(self):
return "%s: %i valid items. Timeout=%rs" % (self.name, len(self), self.timeout) # noqa: E501
def __repr__(self):
s = []
if self:
mk = max(len(k) for k in six.iterkeys(self.__dict__))
fmt = "%%-%is %%s" % (mk + 1)
for item in six.iteritems(self.__dict__):
s.append(fmt % item)
return "\n".join(s)
class NetCache:
def __init__(self):
self._caches_list = []
def add_cache(self, cache):
self._caches_list.append(cache)
setattr(self, cache.name, cache)
def new_cache(self, name, timeout=None):
c = CacheInstance(name=name, timeout=timeout)
self.add_cache(c)
def __delattr__(self, attr):
raise AttributeError("Cannot delete attributes")
def update(self, other):
for co in other._caches_list:
if hasattr(self, co.name):
getattr(self, co.name).update(co)
else:
self.add_cache(co.copy())
def flush(self):
for c in self._caches_list:
c.flush()
def __repr__(self):
return "\n".join(c.summary() for c in self._caches_list)
def _version_checker(module, minver):
"""Checks that module has a higher version that minver.
params:
- module: a module to test
- minver: a tuple of versions
"""
# We could use LooseVersion, but distutils imports imp which is deprecated
version_regexp = r'[a-z]?((?:\d|\.)+\d+)(?:\.dev[0-9]+)?'
version_tags = re.match(version_regexp, module.__version__)
if not version_tags:
return False
version_tags = version_tags.group(1).split(".")
version_tags = tuple(int(x) for x in version_tags)
return version_tags >= minver
def isCryptographyValid():
"""
Check if the cryptography library is present, and if it is recent enough
for most usages in scapy (v1.7 or later).
"""
try:
import cryptography
except ImportError:
return False
return _version_checker(cryptography, (1, 7))
def isCryptographyRecent():
"""
Check if the cryptography library is recent (2.0 and later)
"""
try:
import cryptography
except ImportError:
return False
return _version_checker(cryptography, (2, 0))
def isCryptographyAdvanced():
"""
Check if the cryptography library is present, and if it supports X25519,
ChaCha20Poly1305 and such (v2.0 or later).
"""
try:
from cryptography.hazmat.primitives.asymmetric.x25519 import X25519PrivateKey # noqa: E501
X25519PrivateKey.generate()
except Exception:
return False
else:
return True
def isPyPy():
"""Returns either scapy is running under PyPy or not"""
try:
import __pypy__ # noqa: F401
return True
except ImportError:
return False
def _prompt_changer(attr, val):
"""Change the current prompt theme"""
try:
sys.ps1 = conf.color_theme.prompt(conf.prompt)
except Exception:
pass
try:
apply_ipython_style(get_ipython())
except NameError:
pass
def _set_conf_sockets():
"""Populate the conf.L2Socket and conf.L3Socket
according to the various use_* parameters
"""
from scapy.main import _load
if conf.use_bpf and not BSD:
Interceptor.set_from_hook(conf, "use_bpf", False)
raise ScapyInvalidPlatformException("BSD-like (OSX, *BSD...) only !")
if not conf.use_pcap and SOLARIS:
Interceptor.set_from_hook(conf, "use_pcap", True)
raise ScapyInvalidPlatformException(
"Scapy only supports libpcap on Solaris !"
)
# we are already in an Interceptor hook, use Interceptor.set_from_hook
if conf.use_pcap or conf.use_dnet:
try:
from scapy.arch.pcapdnet import L2pcapListenSocket, L2pcapSocket, \
L3pcapSocket
except (OSError, ImportError):
warning("No libpcap provider available ! pcap won't be used")
Interceptor.set_from_hook(conf, "use_pcap", False)
else:
conf.L3socket = L3pcapSocket
conf.L3socket6 = functools.partial(L3pcapSocket, filter="ip6")
conf.L2socket = L2pcapSocket
conf.L2listen = L2pcapListenSocket
# Update globals
_load("scapy.arch.pcapdnet")
return
if conf.use_bpf:
from scapy.arch.bpf.supersocket import L2bpfListenSocket, \
L2bpfSocket, L3bpfSocket
conf.L3socket = L3bpfSocket
conf.L3socket6 = functools.partial(L3bpfSocket, filter="ip6")
conf.L2socket = L2bpfSocket
conf.L2listen = L2bpfListenSocket
# Update globals
_load("scapy.arch.bpf")
return
if LINUX:
from scapy.arch.linux import L3PacketSocket, L2Socket, L2ListenSocket
conf.L3socket = L3PacketSocket
conf.L3socket6 = functools.partial(L3PacketSocket, filter="ip6")
conf.L2socket = L2Socket
conf.L2listen = L2ListenSocket
# Update globals
_load("scapy.arch.linux")
return
if WINDOWS:
from scapy.arch.windows import _NotAvailableSocket
from scapy.arch.windows.native import L3WinSocket, L3WinSocket6
conf.L3socket = L3WinSocket
conf.L3socket6 = L3WinSocket6
conf.L2socket = _NotAvailableSocket
conf.L2listen = _NotAvailableSocket
# No need to update globals on Windows
return
from scapy.supersocket import L3RawSocket
from scapy.layers.inet6 import L3RawSocket6
conf.L3socket = L3RawSocket
conf.L3socket6 = L3RawSocket6
def _socket_changer(attr, val):
if not isinstance(val, bool):
raise TypeError("This argument should be a boolean")
dependencies = { # Things that will be turned off
"use_pcap": ["use_bpf"],
"use_bpf": ["use_pcap"],
}
restore = {k: getattr(conf, k) for k in dependencies}
del restore[attr] # This is handled directly by _set_conf_sockets
if val: # Only if True
for param in dependencies[attr]:
Interceptor.set_from_hook(conf, param, False)
try:
_set_conf_sockets()
except (ScapyInvalidPlatformException, ImportError) as e:
for key, value in restore.items():
Interceptor.set_from_hook(conf, key, value)
if isinstance(e, ScapyInvalidPlatformException):
raise
def _loglevel_changer(attr, val):
"""Handle a change of conf.logLevel"""
log_scapy.setLevel(val)
class Conf(ConfClass):
"""This object contains the configuration of Scapy.
session : filename where the session will be saved
interactive_shell : can be "ipython", "python" or "auto". Default: Auto
stealth : if 1, prevents any unwanted packet to go out (ARP, DNS, ...)
checkIPID: if 0, doesn't check that IPID matches between IP sent and ICMP IP citation received # noqa: E501
if 1, checks that they either are equal or byte swapped equals (bug in some IP stacks) # noqa: E501
if 2, strictly checks that they are equals
checkIPsrc: if 1, checks IP src in IP and ICMP IP citation match (bug in some NAT stacks) # noqa: E501
checkIPinIP: if True, checks that IP-in-IP layers match. If False, do not
check IP layers that encapsulates another IP layer
check_TCPerror_seqack: if 1, also check that TCP seq and ack match the ones in ICMP citation # noqa: E501
iff : selects the default output interface for srp() and sendp(). default:"eth0") # noqa: E501
verb : level of verbosity, from 0 (almost mute) to 3 (verbose)
promisc : default mode for listening socket (to get answers if you spoof on a lan) # noqa: E501
sniff_promisc : default mode for sniff()
filter : bpf filter added to every sniffing socket to exclude traffic from analysis # noqa: E501
histfile : history file
padding : includes padding in disassembled packets
except_filter : BPF filter for packets to ignore
debug_match : when 1, store received packet that are not matched into debug.recv # noqa: E501
route : holds the Scapy routing table and provides methods to manipulate it
warning_threshold : how much time between warnings from the same place
ASN1_default_codec: Codec used by default for ASN1 objects
mib : holds MIB direct access dictionary
resolve : holds list of fields for which resolution should be done
noenum : holds list of enum fields for which conversion to string should NOT be done # noqa: E501
AS_resolver: choose the AS resolver class to use
extensions_paths: path or list of paths where extensions are to be looked for
contribs : a dict which can be used by contrib layers to store local configuration # noqa: E501
debug_tls:When 1, print some TLS session secrets when they are computed.
recv_poll_rate: how often to check for new packets. Defaults to 0.05s.
"""
version = ReadOnlyAttribute("version", VERSION)
session = ""
interactive = False
interactive_shell = ""
stealth = "not implemented"
iface = None
iface6 = None
layers = LayersList()
commands = CommandsList()
dot15d4_protocol = None # Used in dot15d4.py
logLevel = Interceptor("logLevel", log_scapy.level, _loglevel_changer)
checkIPID = False
checkIPsrc = True
checkIPaddr = True
checkIPinIP = True
check_TCPerror_seqack = False
verb = 2
prompt = Interceptor("prompt", ">>> ", _prompt_changer)
promisc = True
sniff_promisc = 1
raw_layer = None
raw_summary = False
default_l2 = None
l2types = Num2Layer()
l3types = Num2Layer()
L3socket = None
L3socket6 = None
L2socket = None
L2listen = None
BTsocket = None
USBsocket = None
min_pkt_size = 60
bufsize = 2**16
histfile = os.getenv('SCAPY_HISTFILE',
os.path.join(os.path.expanduser("~"),
".scapy_history"))
padding = 1
except_filter = ""
debug_match = False
debug_tls = False
wepkey = ""
cache_iflist = {}
route = None # Filed by route.py
route6 = None # Filed by route6.py
auto_fragment = True
debug_dissector = False
color_theme = Interceptor("color_theme", NoTheme(), _prompt_changer)
warning_threshold = 5
prog = ProgPath()
resolve = Resolve()
noenum = Resolve()
emph = Emphasize()
use_pypy = ReadOnlyAttribute("use_pypy", isPyPy())
use_pcap = Interceptor(
"use_pcap",
os.getenv("SCAPY_USE_PCAPDNET", "").lower().startswith("y"),
_socket_changer
)
# XXX use_dnet is deprecated
use_dnet = os.getenv("SCAPY_USE_PCAPDNET", "").lower().startswith("y")
use_bpf = Interceptor("use_bpf", False, _socket_changer)
use_npcap = False
ipv6_enabled = socket.has_ipv6
extensions_paths = "."
stats_classic_protocols = []
stats_dot11_protocols = []
temp_files = []
netcache = NetCache()
geoip_city = None
# can, tls, http are not loaded by default
load_layers = ['bluetooth', 'bluetooth4LE', 'dhcp', 'dhcp6', 'dns',
'dot11', 'dot15d4', 'eap', 'gprs', 'hsrp', 'inet',
'inet6', 'ipsec', 'ir', 'isakmp', 'l2', 'l2tp',
'llmnr', 'lltd', 'mgcp', 'mobileip', 'netbios',
'netflow', 'ntp', 'ppi', 'ppp', 'pptp', 'radius', 'rip',
'rtp', 'sctp', 'sixlowpan', 'skinny', 'smb', 'snmp',
'tftp', 'vrrp', 'vxlan', 'x509', 'zigbee']
contribs = dict()
crypto_valid = isCryptographyValid()
crypto_valid_recent = isCryptographyRecent()
crypto_valid_advanced = crypto_valid_recent and isCryptographyAdvanced()
fancy_prompt = True
auto_crop_tables = True
recv_poll_rate = 0.05
def __getattr__(self, attr):
# Those are loaded on runtime to avoid import loops
if attr == "manufdb":
from scapy.data import MANUFDB
return MANUFDB
if attr == "ethertypes":
from scapy.data import ETHER_TYPES
return ETHER_TYPES
if attr == "protocols":
from scapy.data import IP_PROTOS
return IP_PROTOS
if attr == "services_udp":
from scapy.data import UDP_SERVICES
return UDP_SERVICES
if attr == "services_tcp":
from scapy.data import TCP_SERVICES
return TCP_SERVICES
return object.__getattr__(self, attr)
if not Conf.ipv6_enabled:
log_scapy.warning("IPv6 support disabled in Python. Cannot load Scapy IPv6 layers.") # noqa: E501
for m in ["inet6", "dhcp6"]:
if m in Conf.load_layers:
Conf.load_layers.remove(m)
conf = Conf()
def crypto_validator(func):
"""
This a decorator to be used for any method relying on the cryptography library. # noqa: E501
Its behaviour depends on the 'crypto_valid' attribute of the global 'conf'.
"""
def func_in(*args, **kwargs):
if not conf.crypto_valid:
raise ImportError("Cannot execute crypto-related method! "
"Please install python-cryptography v1.7 or later.") # noqa: E501
return func(*args, **kwargs)
return func_in
| [((370, 19, 370, 63), 're.match', 're.match', ({(370, 28, 370, 42): 'version_regexp', (370, 44, 370, 62): 'module.__version__'}, {}), '(version_regexp, module.__version__)', False, 'import re\n'), ((522, 4, 522, 27), 'scapy.error.log_scapy.setLevel', 'log_scapy.setLevel', ({(522, 23, 522, 26): 'val'}, {}), '(val)', False, 'from scapy.error import log_scapy, warning, ScapyInvalidPlatformException\n'), ((664, 4, 664, 88), 'scapy.error.log_scapy.warning', 'log_scapy.warning', ({(664, 22, 664, 87): '"""IPv6 support disabled in Python. Cannot load Scapy IPv6 layers."""'}, {}), "(\n 'IPv6 support disabled in Python. Cannot load Scapy IPv6 layers.')", False, 'from scapy.error import log_scapy, warning, ScapyInvalidPlatformException\n'), ((173, 26, 173, 55), 'scapy.modules.six.iteritems', 'six.iteritems', ({(173, 40, 173, 54): 'self.num2layer'}, {}), '(self.num2layer)', False, 'from scapy.modules import six\n'), ((180, 26, 180, 55), 'scapy.modules.six.iteritems', 'six.iteritems', ({(180, 40, 180, 54): 'self.layer2num'}, {}), '(self.layer2num)', False, 'from scapy.modules import six\n'), ((263, 32, 263, 43), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((267, 26, 267, 46), 'scapy.modules.six.iteritems', 'six.iteritems', ({(267, 40, 267, 45): 'other'}, {}), '(other)', False, 'from scapy.modules import six\n'), ((277, 13, 277, 24), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((283, 13, 283, 24), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((287, 15, 287, 42), 'scapy.modules.six.iterkeys', 'six.iterkeys', ({(287, 28, 287, 41): 'self.__dict__'}, {}), '(self.__dict__)', False, 'from scapy.modules import six\n'), ((292, 13, 292, 24), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((298, 13, 298, 24), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((304, 13, 304, 24), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((310, 13, 310, 24), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((408, 8, 408, 35), 'cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.generate', 'X25519PrivateKey.generate', ({}, {}), '()', False, 'from cryptography.hazmat.primitives.asymmetric.x25519 import X25519PrivateKey\n'), ((443, 14, 443, 77), 'scapy.error.ScapyInvalidPlatformException', 'ScapyInvalidPlatformException', ({(443, 44, 443, 76): '"""BSD-like (OSX, *BSD...) only !"""'}, {}), "('BSD-like (OSX, *BSD...) only !')", False, 'from scapy.error import log_scapy, warning, ScapyInvalidPlatformException\n'), ((446, 14, 448, 9), 'scapy.error.ScapyInvalidPlatformException', 'ScapyInvalidPlatformException', ({(447, 12, 447, 54): '"""Scapy only supports libpcap on Solaris !"""'}, {}), "('Scapy only supports libpcap on Solaris !')", False, 'from scapy.error import log_scapy, warning, ScapyInvalidPlatformException\n'), ((469, 25, 469, 69), 'functools.partial', 'functools.partial', (), '', False, 'import functools\n'), ((473, 8, 473, 31), 'scapy.main._load', '_load', ({(473, 14, 473, 30): '"""scapy.arch.bpf"""'}, {}), "('scapy.arch.bpf')", False, 'from scapy.main import _load\n'), ((478, 25, 478, 72), 'functools.partial', 'functools.partial', (), '', False, 'import functools\n'), ((482, 8, 482, 33), 'scapy.main._load', '_load', ({(482, 14, 482, 32): '"""scapy.arch.linux"""'}, {}), "('scapy.arch.linux')", False, 'from scapy.main import _load\n'), ((604, 45, 604, 54), 'scapy.themes.NoTheme', 'NoTheme', ({}, {}), '()', False, 'from scapy.themes import NoTheme, apply_ipython_style\n'), ((276, 19, 276, 47), 'scapy.modules.six.iteritems', 'six.iteritems', ({(276, 33, 276, 46): 'self.__dict__'}, {}), '(self.__dict__)', False, 'from scapy.modules import six\n'), ((282, 19, 282, 46), 'scapy.modules.six.iterkeys', 'six.iterkeys', ({(282, 32, 282, 45): 'self.__dict__'}, {}), '(self.__dict__)', False, 'from scapy.modules import six\n'), ((291, 19, 291, 48), 'scapy.modules.six.itervalues', 'six.itervalues', ({(291, 34, 291, 47): 'self.__dict__'}, {}), '(self.__dict__)', False, 'from scapy.modules import six\n'), ((326, 24, 326, 52), 'scapy.modules.six.iteritems', 'six.iteritems', ({(326, 38, 326, 51): 'self.__dict__'}, {}), '(self.__dict__)', False, 'from scapy.modules import six\n'), ((459, 29, 459, 74), 'functools.partial', 'functools.partial', (), '', False, 'import functools\n'), ((463, 12, 463, 40), 'scapy.main._load', '_load', ({(463, 18, 463, 39): '"""scapy.arch.pcapdnet"""'}, {}), "('scapy.arch.pcapdnet')", False, 'from scapy.main import _load\n'), ((592, 38, 592, 61), 'os.path.expanduser', 'os.path.expanduser', ({(592, 57, 592, 60): '"""~"""'}, {}), "('~')", False, 'import os\n'), ((278, 37, 278, 65), 'scapy.modules.six.iteritems', 'six.iteritems', ({(278, 51, 278, 64): 'self.__dict__'}, {}), '(self.__dict__)', False, 'from scapy.modules import six\n'), ((284, 27, 284, 54), 'scapy.modules.six.iterkeys', 'six.iterkeys', ({(284, 40, 284, 53): 'self.__dict__'}, {}), '(self.__dict__)', False, 'from scapy.modules import six\n'), ((293, 32, 293, 60), 'scapy.modules.six.iteritems', 'six.iteritems', ({(293, 46, 293, 59): 'self.__dict__'}, {}), '(self.__dict__)', False, 'from scapy.modules import six\n'), ((299, 37, 299, 65), 'scapy.modules.six.iteritems', 'six.iteritems', ({(299, 51, 299, 64): 'self.__dict__'}, {}), '(self.__dict__)', False, 'from scapy.modules import six\n'), ((305, 27, 305, 54), 'scapy.modules.six.iterkeys', 'six.iterkeys', ({(305, 40, 305, 53): 'self.__dict__'}, {}), '(self.__dict__)', False, 'from scapy.modules import six\n'), ((309, 24, 309, 44), 'scapy.modules.six.itervalues', 'six.itervalues', ({(309, 39, 309, 43): 'self'}, {}), '(self)', False, 'from scapy.modules import six\n'), ((311, 32, 311, 60), 'scapy.modules.six.iteritems', 'six.iteritems', ({(311, 46, 311, 59): 'self.__dict__'}, {}), '(self.__dict__)', False, 'from scapy.modules import six\n'), ((455, 12, 455, 73), 'scapy.error.warning', 'warning', ({(455, 20, 455, 72): '"""No libpcap provider available ! pcap won\'t be used"""'}, {}), '("No libpcap provider available ! pcap won\'t be used")', False, 'from scapy.error import log_scapy, warning, ScapyInvalidPlatformException\n'), ((248, 15, 248, 26), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((617, 15, 617, 50), 'os.getenv', 'os.getenv', ({(617, 25, 617, 45): '"""SCAPY_USE_PCAPDNET"""', (617, 47, 617, 49): '""""""'}, {}), "('SCAPY_USE_PCAPDNET', '')", False, 'import os\n'), ((324, 37, 324, 64), 'scapy.modules.six.iterkeys', 'six.iterkeys', ({(324, 50, 324, 63): 'self.__dict__'}, {}), '(self.__dict__)', False, 'from scapy.modules import six\n'), ((613, 8, 613, 43), 'os.getenv', 'os.getenv', ({(613, 18, 613, 38): '"""SCAPY_USE_PCAPDNET"""', (613, 40, 613, 42): '""""""'}, {}), "('SCAPY_USE_PCAPDNET', '')", False, 'import os\n')] |
Prodigy123/rasa_nlu_zh | tests/base/test_server.py | b85717063a493f6b148504ee550a0642c6c379ae | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import tempfile
import pytest
import time
from treq.testing import StubTreq
from rasa_nlu.config import RasaNLUConfig
import json
import io
from tests import utilities
from tests.utilities import ResponseTest
from rasa_nlu.server import RasaNLU
@pytest.fixture(scope="module")
def app(tmpdir_factory):
"""
This fixture makes use of the IResource interface of the Klein application to mock Rasa HTTP server.
:param component_builder:
:return:
"""
_, nlu_log_file = tempfile.mkstemp(suffix="_rasa_nlu_logs.json")
_config = {
'write': nlu_log_file,
'port': -1, # unused in test app
"pipeline": "keyword",
"path": tmpdir_factory.mktemp("projects").strpath,
"server_model_dirs": {},
"data": "./data/demo-restaurants.json",
"emulate": "wit",
"max_training_processes": 1
}
config = RasaNLUConfig(cmdline_args=_config)
rasa = RasaNLU(config, testing=True)
return StubTreq(rasa.app.resource())
@pytest.fixture
def rasa_default_train_data():
with io.open('data/examples/rasa/demo-rasa.json',
encoding='utf-8') as train_file:
return json.loads(train_file.read())
@pytest.inlineCallbacks
def test_root(app):
response = yield app.get("http://dummy_uri/")
content = yield response.text()
assert response.code == 200 and content.startswith("hello")
@pytest.inlineCallbacks
def test_status(app):
response = yield app.get("http://dummy_uri/status")
rjs = yield response.json()
assert response.code == 200 and "available_projects" in rjs
assert "default" in rjs["available_projects"]
@pytest.inlineCallbacks
def test_config(app):
response = yield app.get("http://dummy_uri/config")
assert response.code == 200
@pytest.inlineCallbacks
def test_version(app):
response = yield app.get("http://dummy_uri/version")
rjs = yield response.json()
assert response.code == 200 and "version" in rjs
@pytest.mark.parametrize("response_test", [
ResponseTest(
"http://dummy_uri/parse?q=hello",
[{"entities": {}, "confidence": 1.0, "intent": "greet", "_text": "hello"}]
),
ResponseTest(
"http://dummy_uri/parse?query=hello",
[{"entities": {}, "confidence": 1.0, "intent": "greet", "_text": "hello"}]
),
ResponseTest(
"http://dummy_uri/parse?q=hello ńöñàśçií",
[{"entities": {}, "confidence": 1.0, "intent": "greet", "_text": "hello ńöñàśçií"}]
),
ResponseTest(
"http://dummy_uri/parse?q=",
[{"entities": {}, "confidence": 0.0, "intent": None, "_text": ""}]
),
])
@pytest.inlineCallbacks
def test_get_parse(app, response_test):
response = yield app.get(response_test.endpoint)
rjs = yield response.json()
assert response.code == 200
assert len(rjs) == 1
assert all(prop in rjs[0] for prop in ['entities', 'intent', '_text', 'confidence'])
@pytest.mark.parametrize("response_test", [
ResponseTest(
"http://dummy_uri/parse",
[{"entities": {}, "confidence": 1.0, "intent": "greet", "_text": "hello"}],
payload={"q": "hello"}
),
ResponseTest(
"http://dummy_uri/parse",
[{"entities": {}, "confidence": 1.0, "intent": "greet", "_text": "hello"}],
payload={"query": "hello"}
),
ResponseTest(
"http://dummy_uri/parse",
[{"entities": {}, "confidence": 1.0, "intent": "greet", "_text": "hello ńöñàśçií"}],
payload={"q": "hello ńöñàśçií"}
),
])
@pytest.inlineCallbacks
def test_post_parse(app, response_test):
response = yield app.post(response_test.endpoint, data=json.dumps(response_test.payload),
content_type='application/json')
rjs = yield response.json()
assert response.code == 200
assert len(rjs) == 1
assert all(prop in rjs[0] for prop in ['entities', 'intent', '_text', 'confidence'])
@utilities.slowtest
@pytest.inlineCallbacks
def test_post_train(app, rasa_default_train_data):
response = app.post("http://dummy_uri/train", data=json.dumps(rasa_default_train_data),
content_type='application/json')
time.sleep(3)
app.flush()
response = yield response
rjs = yield response.json()
assert response.code == 404, "A project name to train must be specified"
assert "error" in rjs
@utilities.slowtest
@pytest.inlineCallbacks
def test_post_train_internal_error(app, rasa_default_train_data):
response = app.post("http://dummy_uri/train?project=test",
data=json.dumps({"data": "dummy_data_for_triggering_an_error"}),
content_type='application/json')
time.sleep(3)
app.flush()
response = yield response
rjs = yield response.json()
assert response.code == 500, "The training data format is not valid"
assert "error" in rjs
@pytest.inlineCallbacks
def test_model_hot_reloading(app, rasa_default_train_data):
query = "http://dummy_uri/parse?q=hello&project=my_keyword_model"
response = yield app.get(query)
assert response.code == 404, "Project should not exist yet"
train_u = "http://dummy_uri/train?project=my_keyword_model&pipeline=keyword"
response = app.post(train_u,
data=json.dumps(rasa_default_train_data),
content_type='application/json')
time.sleep(3)
app.flush()
response = yield response
assert response.code == 200, "Training should end successfully"
response = yield app.get(query)
assert response.code == 200, "Project should now exist after it got trained"
| [((23, 1, 23, 31), 'pytest.fixture', 'pytest.fixture', (), '', False, 'import pytest\n'), ((31, 22, 31, 68), 'tempfile.mkstemp', 'tempfile.mkstemp', (), '', False, 'import tempfile\n'), ((43, 13, 43, 48), 'rasa_nlu.config.RasaNLUConfig', 'RasaNLUConfig', (), '', False, 'from rasa_nlu.config import RasaNLUConfig\n'), ((44, 11, 44, 40), 'rasa_nlu.server.RasaNLU', 'RasaNLU', (), '', False, 'from rasa_nlu.server import RasaNLU\n'), ((142, 4, 142, 17), 'time.sleep', 'time.sleep', ({(142, 15, 142, 16): '(3)'}, {}), '(3)', False, 'import time\n'), ((156, 4, 156, 17), 'time.sleep', 'time.sleep', ({(156, 15, 156, 16): '(3)'}, {}), '(3)', False, 'import time\n'), ((173, 4, 173, 17), 'time.sleep', 'time.sleep', ({(173, 15, 173, 16): '(3)'}, {}), '(3)', False, 'import time\n'), ((50, 9, 51, 34), 'io.open', 'io.open', (), '', False, 'import io\n'), ((84, 4, 87, 5), 'tests.utilities.ResponseTest', 'ResponseTest', ({(85, 8, 85, 40): '"""http://dummy_uri/parse?q=hello"""', (86, 8, 86, 82): "[{'entities': {}, 'confidence': 1.0, 'intent': 'greet', '_text': 'hello'}]"}, {}), "('http://dummy_uri/parse?q=hello', [{'entities': {},\n 'confidence': 1.0, 'intent': 'greet', '_text': 'hello'}])", False, 'from tests.utilities import ResponseTest\n'), ((88, 4, 91, 5), 'tests.utilities.ResponseTest', 'ResponseTest', ({(89, 8, 89, 44): '"""http://dummy_uri/parse?query=hello"""', (90, 8, 90, 82): "[{'entities': {}, 'confidence': 1.0, 'intent': 'greet', '_text': 'hello'}]"}, {}), "('http://dummy_uri/parse?query=hello', [{'entities': {},\n 'confidence': 1.0, 'intent': 'greet', '_text': 'hello'}])", False, 'from tests.utilities import ResponseTest\n'), ((92, 4, 95, 5), 'tests.utilities.ResponseTest', 'ResponseTest', ({(93, 8, 93, 56): '"""http://dummy_uri/parse?q=hello ńöñàśçií"""', (94, 8, 94, 98): "[{'entities': {}, 'confidence': 1.0, 'intent': 'greet', '_text':\n 'hello ńöñàśçií'}]"}, {}), "('http://dummy_uri/parse?q=hello ńöñàśçií', [{'entities': {},\n 'confidence': 1.0, 'intent': 'greet', '_text': 'hello ńöñàśçií'}])", False, 'from tests.utilities import ResponseTest\n'), ((96, 4, 99, 5), 'tests.utilities.ResponseTest', 'ResponseTest', ({(97, 8, 97, 35): '"""http://dummy_uri/parse?q="""', (98, 8, 98, 74): "[{'entities': {}, 'confidence': 0.0, 'intent': None, '_text': ''}]"}, {}), "('http://dummy_uri/parse?q=', [{'entities': {}, 'confidence': \n 0.0, 'intent': None, '_text': ''}])", False, 'from tests.utilities import ResponseTest\n'), ((111, 4, 115, 5), 'tests.utilities.ResponseTest', 'ResponseTest', (), '', False, 'from tests.utilities import ResponseTest\n'), ((116, 4, 120, 5), 'tests.utilities.ResponseTest', 'ResponseTest', (), '', False, 'from tests.utilities import ResponseTest\n'), ((121, 4, 125, 5), 'tests.utilities.ResponseTest', 'ResponseTest', (), '', False, 'from tests.utilities import ResponseTest\n'), ((140, 55, 140, 90), 'json.dumps', 'json.dumps', ({(140, 66, 140, 89): 'rasa_default_train_data'}, {}), '(rasa_default_train_data)', False, 'import json\n'), ((154, 29, 154, 87), 'json.dumps', 'json.dumps', ({(154, 40, 154, 86): "{'data': 'dummy_data_for_triggering_an_error'}"}, {}), "({'data': 'dummy_data_for_triggering_an_error'})", False, 'import json\n'), ((171, 29, 171, 64), 'json.dumps', 'json.dumps', ({(171, 40, 171, 63): 'rasa_default_train_data'}, {}), '(rasa_default_train_data)', False, 'import json\n'), ((129, 59, 129, 92), 'json.dumps', 'json.dumps', ({(129, 70, 129, 91): 'response_test.payload'}, {}), '(response_test.payload)', False, 'import json\n')] |
ForroKulcs/bugsnag-python | bugsnag/configuration.py | 107c1add31a2202cc08ef944aa00ab96996b247a | import os
import platform
import socket
import sysconfig
from typing import List, Any, Tuple, Union
import warnings
from bugsnag.sessiontracker import SessionMiddleware
from bugsnag.middleware import DefaultMiddleware, MiddlewareStack
from bugsnag.utils import (fully_qualified_class_name, validate_str_setter,
validate_bool_setter, validate_iterable_setter,
validate_required_str_setter)
from bugsnag.delivery import (create_default_delivery, DEFAULT_ENDPOINT,
DEFAULT_SESSIONS_ENDPOINT)
from bugsnag.uwsgi import warn_if_running_uwsgi_without_threads
try:
from contextvars import ContextVar
_request_info = ContextVar('bugsnag-request', default=None) # type: ignore
except ImportError:
from bugsnag.utils import ThreadContextVar
_request_info = ThreadContextVar('bugsnag-request', default=None) # type: ignore # noqa: E501
__all__ = ('Configuration', 'RequestConfiguration')
class Configuration:
"""
Global app-level Bugsnag configuration settings.
"""
def __init__(self):
self.api_key = os.environ.get('BUGSNAG_API_KEY', None)
self.release_stage = os.environ.get("BUGSNAG_RELEASE_STAGE",
"production")
self.notify_release_stages = None
self.auto_notify = True
self.send_code = True
self.send_environment = False
self.asynchronous = True
self.delivery = create_default_delivery()
self.lib_root = sysconfig.get_path('purelib')
self.project_root = os.getcwd()
self.app_type = None
self.app_version = None
self.params_filters = ["password", "password_confirmation", "cookie",
"authorization"]
self.ignore_classes = [
"KeyboardInterrupt",
"django.http.Http404",
"django.http.response.Http404",
]
self.endpoint = DEFAULT_ENDPOINT
self.session_endpoint = DEFAULT_SESSIONS_ENDPOINT
self.auto_capture_sessions = True
self.traceback_exclude_modules = []
self.middleware = MiddlewareStack()
self.internal_middleware = MiddlewareStack()
self.internal_middleware.append(DefaultMiddleware)
self.internal_middleware.append(SessionMiddleware)
self.proxy_host = None
if not os.getenv("DYNO"):
self.hostname = socket.gethostname()
else:
self.hostname = None
self.runtime_versions = {"python": platform.python_version()}
def configure(self, api_key=None, app_type=None, app_version=None,
asynchronous=None, auto_notify=None,
auto_capture_sessions=None, delivery=None, endpoint=None,
hostname=None, ignore_classes=None, lib_root=None,
notify_release_stages=None, params_filters=None,
project_root=None, proxy_host=None, release_stage=None,
send_code=None, send_environment=None, session_endpoint=None,
traceback_exclude_modules=None):
"""
Validate and set configuration options. Will warn if an option is of an
incorrect type.
"""
if api_key is not None:
self.api_key = api_key
if app_type is not None:
self.app_type = app_type
if app_version is not None:
self.app_version = app_version
if asynchronous is not None:
self.asynchronous = asynchronous
if auto_notify is not None:
self.auto_notify = auto_notify
if auto_capture_sessions is not None:
self.auto_capture_sessions = auto_capture_sessions
if delivery is not None:
self.delivery = delivery
if endpoint is not None:
self.endpoint = endpoint
if hostname is not None:
self.hostname = hostname
if ignore_classes is not None:
self.ignore_classes = ignore_classes
if lib_root is not None:
self.lib_root = lib_root
if notify_release_stages is not None:
self.notify_release_stages = notify_release_stages
if params_filters is not None:
self.params_filters = params_filters
if project_root is not None:
self.project_root = project_root
if proxy_host is not None:
self.proxy_host = proxy_host
if release_stage is not None:
self.release_stage = release_stage
if send_code is not None:
self.send_code = send_code
if send_environment is not None:
self.send_environment = send_environment
if session_endpoint is not None:
self.session_endpoint = session_endpoint
if traceback_exclude_modules is not None:
self.traceback_exclude_modules = traceback_exclude_modules
return self
def get(self, name):
"""
Get a single configuration option
"""
warnings.warn('Using get() to retrieve a Configuration property is ' +
'deprecated in favor of referencing properties directly',
DeprecationWarning)
return getattr(self, name)
@property
def api_key(self):
"""
Unique application identifier
"""
return self._api_key
@api_key.setter # type: ignore
@validate_required_str_setter
def api_key(self, value: str):
self._api_key = value
@property
def app_type(self):
"""
Category for the current application or task
"""
return self._app_type
@app_type.setter # type: ignore
@validate_str_setter
def app_type(self, value: str):
self._app_type = value
@property
def app_version(self):
"""
Release version of the current application
"""
return self._app_version
@app_version.setter # type: ignore
@validate_str_setter
def app_version(self, value: str):
self._app_version = value
@property
def asynchronous(self):
"""
If API requests should be sent asynchronously
"""
return self._asynchronous
@asynchronous.setter # type: ignore
@validate_bool_setter
def asynchronous(self, value: bool):
self._asynchronous = value
if value:
warn_if_running_uwsgi_without_threads()
@property
def auto_capture_sessions(self):
"""
If sessions should be automatically detected and delivered from web
request integrations
"""
return self._auto_capture_sessions
@auto_capture_sessions.setter # type: ignore
@validate_bool_setter
def auto_capture_sessions(self, value: bool):
self._auto_capture_sessions = value
@property
def auto_notify(self):
"""
If uncaught exceptions should be automatically captured and reported
"""
return self._auto_notify
@auto_notify.setter # type: ignore
@validate_bool_setter
def auto_notify(self, value: bool):
self._auto_notify = value
@property
def delivery(self):
"""
Transport mechanism used to make API requests. Implement the Delivery
interface to customize how requests are sent.
"""
return self._delivery
@delivery.setter # type: ignore
def delivery(self, value):
if hasattr(value, 'deliver') and callable(value.deliver):
self._delivery = value
else:
message = ('delivery should implement Delivery interface, got ' +
'{0}. This will be an error in a future release.')
warnings.warn(message.format(type(value).__name__), RuntimeWarning)
@property
def endpoint(self):
"""
Event API endpoint. Set this property if using Bugsnag On-Premise.
>>> config = Configuration()
>>> config.endpoint = 'https://notify.bugsnag.example.co'
"""
return self._endpoint
@endpoint.setter # type: ignore
@validate_required_str_setter
def endpoint(self, value: str):
self._endpoint = value
@property
def hostname(self):
"""
The host name of the application server. This value is automatically
detected for Heroku applications and included in event device metadata.
"""
return self._hostname
@hostname.setter # type: ignore
@validate_str_setter
def hostname(self, value: str):
self._hostname = value
@property
def ignore_classes(self):
"""
Fully qualified class names which should be ignored when capturing
uncaught exceptions and other events. KeyboardInterrupt and Http404
exceptions are ignored by default.
"""
return self._ignore_classes
@ignore_classes.setter # type: ignore
@validate_iterable_setter
def ignore_classes(self, value: Union[List[str], Tuple[str]]):
self._ignore_classes = value
@property
def lib_root(self):
"""
The path to the Python library. Any traceback frame which contains
lib_root as a prefix is considered out-of-project. The prefix is also
stripped to make file names easier to read.
"""
return self._lib_root
@lib_root.setter # type: ignore
@validate_str_setter
def lib_root(self, value: str):
self._lib_root = value
@property
def notify_release_stages(self):
"""
A list of release_stage values which are permitted to capture and send
events and sessions. By default this value is None and all events and
sessions are delivered.
"""
return self._notify_release_stages
@notify_release_stages.setter # type: ignore
@validate_iterable_setter
def notify_release_stages(self, value: List[str]):
self._notify_release_stages = value
@property
def params_filters(self):
"""
A list of filters applied to event metadata to prevent the values from
being sent in events. By default the following keys are filtered:
* authorization
* cookie
* password
* password_confirmation
"""
return self._params_filters
@params_filters.setter # type: ignore
@validate_iterable_setter
def params_filters(self, value: List[str]):
self._params_filters = value
@property
def project_root(self):
"""
The working directory containing the application source code.
Traceback file paths which contain this prefix are considered a part of
the project. This prefix is also stripped to increase file name
readability in traceback lines.
"""
return self._project_root
@project_root.setter # type: ignore
@validate_str_setter
def project_root(self, value: str):
self._project_root = value
@property
def proxy_host(self):
"""
The host name of the proxy to use to deliver requests, if any
"""
return self._proxy_host
@proxy_host.setter # type: ignore
@validate_str_setter
def proxy_host(self, value: str):
self._proxy_host = value
@property
def release_stage(self):
"""
The development phase of the deployed application. This value is used
to differentiate events which occur in production vs development or
staging environments.
"""
return self._release_stage
@release_stage.setter # type: ignore
@validate_str_setter
def release_stage(self, value: str):
self._release_stage = value
@property
def send_code(self):
"""
If the source code lines immediately surrounding traceback locations
should be sent with events
"""
return self._send_code
@send_code.setter # type: ignore
@validate_bool_setter
def send_code(self, value: bool):
self._send_code = value
@property
def send_environment(self):
"""
If the request environment should be automatically collected and
attached to events
"""
return self._send_environment
@send_environment.setter # type: ignore
@validate_bool_setter
def send_environment(self, value: bool):
self._send_environment = value
@property
def session_endpoint(self):
"""
Sessions API endpoint. Set this property if using Bugsnag On-Premise.
>>> config = Configuration()
>>> config.session_endpoint = 'https://sessions.bugsnag.example.co'
"""
return self._session_endpoint
@session_endpoint.setter # type: ignore
@validate_required_str_setter
def session_endpoint(self, value: str):
self._session_endpoint = value
@property
def traceback_exclude_modules(self):
"""
Modules which should be stripped from event tracebacks entirely
"""
return self._traceback_exclude_modules
@traceback_exclude_modules.setter # type: ignore
@validate_iterable_setter
def traceback_exclude_modules(self, value: List[str]):
self._traceback_exclude_modules = value
def should_notify(self) -> bool:
return self.notify_release_stages is None or \
(isinstance(self.notify_release_stages, (tuple, list)) and
self.release_stage in self.notify_release_stages)
def should_ignore(self, exception: BaseException) -> bool:
return self.ignore_classes is not None and \
fully_qualified_class_name(exception) in self.ignore_classes
class RequestConfiguration:
"""
Per-request Bugsnag configuration settings.
"""
@classmethod
def get_instance(cls):
"""
Get this thread's instance of the RequestConfiguration.
"""
try:
instance = _request_info.get()
except LookupError:
instance = None
if instance is None:
instance = RequestConfiguration()
_request_info.set(instance) # type: ignore
return instance
@classmethod
def clear(cls):
"""
Clear this thread's instance of the RequestConfiguration.
"""
_request_info.set(None)
def __init__(self):
self.context = None
self.grouping_hash = None
self.user = {}
self.metadata = {}
# legacy fields
self.user_id = None
self.extra_data = {}
self.request_data = {}
self.environment_data = {}
self.session_data = {}
def get(self, name) -> Any:
"""
Get a single configuration option
"""
return getattr(self, name)
def configure(self, **options):
"""
Set one or more configuration settings.
"""
for name, value in options.items():
setattr(self, name, value)
return self
@property
def meta_data(self) -> Any:
warnings.warn('RequestConfiguration.meta_data has been renamed to ' +
'"metadata"', DeprecationWarning)
return self.metadata
| [((19, 20, 19, 63), 'contextvars.ContextVar', 'ContextVar', (), '', False, 'from contextvars import ContextVar\n'), ((22, 20, 22, 69), 'bugsnag.utils.ThreadContextVar', 'ThreadContextVar', (), '', False, 'from bugsnag.utils import ThreadContextVar\n'), ((33, 23, 33, 62), 'os.environ.get', 'os.environ.get', ({(33, 38, 33, 55): '"""BUGSNAG_API_KEY"""', (33, 57, 33, 61): 'None'}, {}), "('BUGSNAG_API_KEY', None)", False, 'import os\n'), ((34, 29, 35, 57), 'os.environ.get', 'os.environ.get', ({(34, 44, 34, 67): '"""BUGSNAG_RELEASE_STAGE"""', (35, 44, 35, 56): '"""production"""'}, {}), "('BUGSNAG_RELEASE_STAGE', 'production')", False, 'import os\n'), ((41, 24, 41, 49), 'bugsnag.delivery.create_default_delivery', 'create_default_delivery', ({}, {}), '()', False, 'from bugsnag.delivery import create_default_delivery, DEFAULT_ENDPOINT, DEFAULT_SESSIONS_ENDPOINT\n'), ((42, 24, 42, 53), 'sysconfig.get_path', 'sysconfig.get_path', ({(42, 43, 42, 52): '"""purelib"""'}, {}), "('purelib')", False, 'import sysconfig\n'), ((43, 28, 43, 39), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((58, 26, 58, 43), 'bugsnag.middleware.MiddlewareStack', 'MiddlewareStack', ({}, {}), '()', False, 'from bugsnag.middleware import DefaultMiddleware, MiddlewareStack\n'), ((60, 35, 60, 52), 'bugsnag.middleware.MiddlewareStack', 'MiddlewareStack', ({}, {}), '()', False, 'from bugsnag.middleware import DefaultMiddleware, MiddlewareStack\n'), ((131, 8, 133, 41), 'warnings.warn', 'warnings.warn', ({(131, 22, 132, 78): "('Using get() to retrieve a Configuration property is ' +\n 'deprecated in favor of referencing properties directly')", (133, 22, 133, 40): 'DeprecationWarning'}, {}), "('Using get() to retrieve a Configuration property is ' +\n 'deprecated in favor of referencing properties directly',\n DeprecationWarning)", False, 'import warnings\n'), ((479, 8, 480, 55), 'warnings.warn', 'warnings.warn', ({(479, 22, 480, 34): '(\'RequestConfiguration.meta_data has been renamed to \' + \'"metadata"\')', (480, 36, 480, 54): 'DeprecationWarning'}, {}), '(\'RequestConfiguration.meta_data has been renamed to \' +\n \'"metadata"\', DeprecationWarning)', False, 'import warnings\n'), ((66, 15, 66, 32), 'os.getenv', 'os.getenv', ({(66, 25, 66, 31): '"""DYNO"""'}, {}), "('DYNO')", False, 'import os\n'), ((67, 28, 67, 48), 'socket.gethostname', 'socket.gethostname', ({}, {}), '()', False, 'import socket\n'), ((71, 43, 71, 68), 'platform.python_version', 'platform.python_version', ({}, {}), '()', False, 'import platform\n'), ((184, 12, 184, 51), 'bugsnag.uwsgi.warn_if_running_uwsgi_without_threads', 'warn_if_running_uwsgi_without_threads', ({}, {}), '()', False, 'from bugsnag.uwsgi import warn_if_running_uwsgi_without_threads\n'), ((417, 12, 417, 49), 'bugsnag.utils.fully_qualified_class_name', 'fully_qualified_class_name', ({(417, 39, 417, 48): 'exception'}, {}), '(exception)', False, 'from bugsnag.utils import fully_qualified_class_name, validate_str_setter, validate_bool_setter, validate_iterable_setter, validate_required_str_setter\n')] |
failk8s/failk8s-operator | secret_injector/secret.py | 457890a09a2551b9002eec73386b11a37469569f | import kopf
from .functions import global_logger, reconcile_secret
@kopf.on.event("", "v1", "secrets")
def injector_secret_event(type, event, logger, **_):
obj = event["object"]
namespace = obj["metadata"]["namespace"]
name = obj["metadata"]["name"]
# If secret already exists, indicated by type being None, the
# secret is added or modified later, do a full reconcilation to
# ensure that if now match will inject the secret.
with global_logger(logger):
if type in (None, "ADDED", "MODIFIED"):
reconcile_secret(name, namespace, obj)
| [((6, 1, 6, 35), 'kopf.on.event', 'kopf.on.event', ({(6, 15, 6, 17): '""""""', (6, 19, 6, 23): '"""v1"""', (6, 25, 6, 34): '"""secrets"""'}, {}), "('', 'v1', 'secrets')", False, 'import kopf\n')] |
openforis/collectearthonline | src/py/gee/utils.py | 1af48e373c393a1d8c48b17472f6aa6c41f65769 | import datetime
import os
import ee
import math
import sys
import json
from ee.ee_exception import EEException
from gee.inputs import getLandsat, getS1
########## Helper functions ##########
def initialize(ee_account='', ee_key_path=''):
try:
if ee_account and ee_key_path and os.path.exists(ee_key_path):
credentials = ee.ServiceAccountCredentials(ee_account, ee_key_path)
ee.Initialize(credentials)
else:
ee.Initialize()
except Exception as e:
print(e)
def getReducer(reducer):
reducerName = reducer.lower()
if(reducerName == 'min'):
return ee.Reducer.min()
elif (reducerName == 'max'):
return ee.Reducer.max()
elif (reducerName == 'mean'):
return ee.Reducer.mean()
elif (reducerName == 'mode'):
return ee.Reducer.mode()
elif (reducerName == 'first'):
return ee.Reducer.first()
elif (reducerName == 'last'):
return ee.Reducer.last()
elif (reducerName == 'sum'):
return ee.Reducer.sum()
else:
return ee.Reducer.median()
def reduceIC(imageCollection, reducer):
reducerName = reducer.lower()
if(reducerName == 'min'):
return imageCollection.min()
elif (reducerName == 'max'):
return imageCollection.max()
elif (reducerName == 'mean'):
return imageCollection.mean()
elif (reducerName == 'mode'):
return imageCollection.mode()
elif (reducerName == 'mosaic'):
return imageCollection.mosaic()
elif (reducerName == 'first'):
return imageCollection.first()
elif (reducerName == 'sum'):
return imageCollection.sum()
else:
return imageCollection.median()
def safeParseJSON(val):
if isinstance(val, dict):
return val
else:
try:
return json.loads(val)
except Exception as e:
try:
return json.loads(val.replace("'", "\""))
except Exception as e:
return {}
########## Helper routes ##########
def listAvailableBands(name, assetType):
eeImage = None
if assetType == "imageCollection":
eeImage = ee.ImageCollection(name).first()
else:
eeImage = ee.Image(name)
return {
'bands': eeImage.bandNames().getInfo(),
'imageName': name
}
########## ee.Image ##########
def imageToMapId(image, visParams):
eeImage = ee.Image(image)
mapId = eeImage.getMapId(visParams)
# TODO, just return URL so the routes are easier to deduce whats being returned.
return {
'url': mapId['tile_fetcher'].url_format
}
########## ee.ImageCollection ##########
def imageCollectionToMapId(assetId, visParams, reducer, startDate, endDate):
eeCollection = ee.ImageCollection(assetId)
if (startDate and endDate):
eeFilterDate = ee.Filter.date(startDate, endDate)
eeCollection = eeCollection.filter(eeFilterDate)
reducedImage = ee.Image(reduceIC(eeCollection, reducer))
return imageToMapId(reducedImage, visParams)
# TODO, should we allow user to select first cloud free image again?
def firstCloudFreeImageInMosaicToMapId(assetId, visParams, startDate, endDate):
skipCloudMask = False
eeCollection = ee.ImageCollection(assetId)
lowerAsset = assetId.lower()
if("b2" not in visParams["bands"].lower()):
skipCloudMask = True
elif ("lc8" in lowerAsset):
skipCloudMask = False
elif ("le7" in lowerAsset):
skipCloudMask = False
elif ("lt5" in lowerAsset):
skipCloudMask = False
else:
skipCloudMask = True
if (startDate and endDate):
eeFilterDate = ee.Filter.date(startDate, endDate)
eeCollection = eeCollection.filter(eeFilterDate)
eeFirstImage = ee.Image(eeCollection.mosaic())
try:
if(skipCloudMask == False):
sID = ''
if ("lc8" in lowerAsset):
sID = 'OLI_TIRS'
elif ("le7" in lowerAsset):
sID = 'ETM'
elif ("lt5" in lowerAsset):
sID = 'TM'
scored = ee.Algorithms.Landsat.simpleCloudScore(
eeFirstImage.set('SENSOR_ID', sID))
mask = scored.select(['cloud']).lte(20)
masked = eeFirstImage.updateMask(mask)
values = imageToMapId(masked, visParams)
else:
values = imageToMapId(eeFirstImage, visParams)
except EEException as ine:
imageToMapId(eeFirstImage, visParams)
return values
########## ee.FeatureCollection ##########
def getFeatureCollectionTileUrl(featureCollection, field, matchID, visParams):
fc = ee.FeatureCollection(featureCollection)
single = fc.filter(ee.Filter.equals(field, matchID))
mapId = ee.Image().paint(single, 0, 2).getMapId(visParams)
return mapId['tile_fetcher'].url_format
########## Pre defined ee.ImageCollection ##########
# Index Image Collection
def lsMaskClouds(img, cloudThresh=10):
score = ee.Image(1.0)
# Clouds are reasonably bright in the blue band.
blue_rescale = img.select('blue').subtract(ee.Number(0.1)).divide(
ee.Number(0.3).subtract(ee.Number(0.1)))
score = score.min(blue_rescale)
# Clouds are reasonably bright in all visible bands.
visible = img.select('red').add(
img.select('green')).add(img.select('blue'))
visible_rescale = visible.subtract(ee.Number(0.2)).divide(
ee.Number(0.8).subtract(ee.Number(0.2)))
score = score.min(visible_rescale)
# Clouds are reasonably bright in all infrared bands.
infrared = img.select('nir').add(
img.select('swir1')).add(img.select('swir2'))
infrared_rescale = infrared.subtract(ee.Number(0.3)).divide(
ee.Number(0.8).subtract(ee.Number(0.3)))
score = score.min(infrared_rescale)
# Clouds are reasonably cool in temperature.
temp_rescale = img.select('temp').subtract(ee.Number(300)).divide(
ee.Number(290).subtract(ee.Number(300)))
score = score.min(temp_rescale)
# However, clouds are not snow.
ndsi = img.normalizedDifference(['green', 'swir1'])
ndsi_rescale = ndsi.subtract(ee.Number(0.8)).divide(
ee.Number(0.6).subtract(ee.Number(0.8)))
score = score.min(ndsi_rescale).multiply(100).byte()
mask = score.lt(cloudThresh).rename(['cloudMask'])
img = img.updateMask(mask)
return img.addBands(score)
def s2MaskClouds(img):
qa = img.select('QA60')
# Bits 10 and 11 are clouds and cirrus, respectively.
cloudBitMask = int(math.pow(2, 10))
cirrusBitMask = int(math.pow(2, 11))
# clear if both flags set to zero.
clear = qa.bitwiseAnd(cloudBitMask).eq(0).And(
qa.bitwiseAnd(cirrusBitMask).eq(0))
return img.divide(10000).updateMask(clear).set('system:time_start', img.get('system:time_start'))
def bandPassAdjustment(img):
keep = img.select(['temp'])
bands = ['blue', 'green', 'red', 'nir', 'swir1', 'swir2']
# linear regression coefficients for adjustment
gain = ee.Array([[0.977], [1.005], [0.982], [1.001], [1.001], [0.996]])
bias = ee.Array([[-0.00411], [-0.00093], [0.00094],
[-0.00029], [-0.00015], [-0.00097]])
# Make an Array Image, with a 2-D Array per pixel.
arrayImage2D = img.select(bands).toArray().toArray(1)
# apply correction factors and reproject array to geographic image
componentsImage = ee.Image(gain).multiply(arrayImage2D).add(ee.Image(bias)) \
.arrayProject([0]).arrayFlatten([bands]).float()
# .set('system:time_start',img.get('system:time_start'));
return keep.addBands(componentsImage)
def getLandSatMergedCollection():
sensorBandDictLandsatTOA = {'L8': [1, 2, 3, 4, 5, 9, 6],
'L7': [0, 1, 2, 3, 4, 5, 7],
'L5': [0, 1, 2, 3, 4, 5, 6],
'L4': [0, 1, 2, 3, 4, 5, 6],
'S2': [1, 2, 3, 7, 11, 10, 12]}
bandNamesLandsatTOA = ['blue', 'green',
'red', 'nir', 'swir1', 'temp', 'swir2']
metadataCloudCoverMax = 100
lt4 = ee.ImageCollection('LANDSAT/LT4_L1T_TOA') \
.filterMetadata('CLOUD_COVER', 'less_than', metadataCloudCoverMax) \
.select(sensorBandDictLandsatTOA['L4'], bandNamesLandsatTOA).map(lsMaskClouds)
lt5 = ee.ImageCollection('LANDSAT/LT5_L1T_TOA') \
.filterMetadata('CLOUD_COVER', 'less_than', metadataCloudCoverMax) \
.select(sensorBandDictLandsatTOA['L5'], bandNamesLandsatTOA).map(lsMaskClouds)
le7 = ee.ImageCollection('LANDSAT/LE7_L1T_TOA') \
.filterMetadata('CLOUD_COVER', 'less_than', metadataCloudCoverMax) \
.select(sensorBandDictLandsatTOA['L7'], bandNamesLandsatTOA).map(lsMaskClouds)
lc8 = ee.ImageCollection('LANDSAT/LC08/C01/T1_TOA') \
.filterMetadata('CLOUD_COVER', 'less_than', metadataCloudCoverMax) \
.select(sensorBandDictLandsatTOA['L8'], bandNamesLandsatTOA).map(lsMaskClouds)
s2 = ee.ImageCollection('COPERNICUS/S2') \
.filterMetadata('CLOUDY_PIXEL_PERCENTAGE', 'less_than', metadataCloudCoverMax) \
.map(s2MaskClouds).select(sensorBandDictLandsatTOA['S2'], bandNamesLandsatTOA) \
.map(bandPassAdjustment)
return ee.ImageCollection(lt4.merge(lt5).merge(le7).merge(lc8).merge(s2))
def filteredImageNDVIToMapId(startDate, endDate):
def calcNDVI(img):
return img.expression('(i.nir - i.red) / (i.nir + i.red)', {'i': img}).rename(['NDVI']) \
.set('system:time_start', img.get('system:time_start'))
eeCollection = getLandSatMergedCollection().filterDate(startDate, endDate)
colorPalette = 'c9c0bf,435ebf,eee8aa,006400'
visParams = {'opacity': 1, 'max': 1,
'min': -1, 'palette': colorPalette}
eviImage = ee.Image(eeCollection.map(calcNDVI).mean())
return imageToMapId(eviImage, visParams)
def filteredImageEVIToMapId(startDate, endDate):
def calcEVI(img):
return img.expression('2.5 * (i.nir - i.red) / (i.nir + 6.0 * i.red - 7.5 * i.blue + 1)', {'i': img}).rename(['EVI']) \
.set('system:time_start', img.get('system:time_start'))
eeCollection = getLandSatMergedCollection().filterDate(startDate, endDate)
colorPalette = 'F5F5F5,E6D3C5,C48472,B9CF63,94BF3D,6BB037,42A333,00942C,008729,007824,004A16'
visParams = {'opacity': 1, 'max': 1,
'min': -1, 'palette': colorPalette}
eviImage = ee.Image(eeCollection.map(calcEVI).mean())
return imageToMapId(eviImage, visParams)
def filteredImageEVI2ToMapId(startDate, endDate):
def calcEVI2(img):
return img.expression('2.5 * (i.nir - i.red) / (i.nir + 2.4 * i.red + 1)', {'i': img}).rename(['EVI2']) \
.set('system:time_start', img.get('system:time_start'))
eeCollection = getLandSatMergedCollection().filterDate(startDate, endDate)
colorPalette = 'F5F5F5,E6D3C5,C48472,B9CF63,94BF3D,6BB037,42A333,00942C,008729,007824,004A16'
visParams = {'opacity': 1, 'max': 1,
'min': -1, 'palette': colorPalette}
eviImage = ee.Image(eeCollection.map(calcEVI2).mean())
return imageToMapId(eviImage, visParams)
def filteredImageNDMIToMapId(startDate, endDate):
def calcNDMI(img):
return img.expression('(i.nir - i.swir1) / (i.nir + i.swir1)', {'i': img}).rename(['NDMI']) \
.set('system:time_start', img.get('system:time_start'))
eeCollection = getLandSatMergedCollection().filterDate(startDate, endDate)
colorPalette = '0000FE,2E60FD,31B0FD,00FEFE,50FE00,DBFE66,FEFE00,FFBB00,FF6F00,FE0000'
visParams = {'opacity': 1, 'max': 1,
'min': -1, 'palette': colorPalette}
eviImage = ee.Image(eeCollection.map(calcNDMI).mean())
return imageToMapId(eviImage, visParams)
def filteredImageNDWIToMapId(startDate, endDate):
def calcNDWI(img):
return img.expression('(i.green - i.nir) / (i.green + i.nir)', {'i': img}).rename(['NDWI']) \
.set('system:time_start', img.get('system:time_start'))
eeCollection = getLandSatMergedCollection().filterDate(startDate, endDate)
colorPalette = '505050,E8E8E8,00FF33,003300'
visParams = {'opacity': 1, 'max': 1,
'min': -1, 'palette': colorPalette}
eviImage = ee.Image(eeCollection.map(calcNDWI).mean())
return imageToMapId(eviImage, visParams)
def filteredImageByIndexToMapId(startDate, endDate, index):
lowerIndex = index.lower()
if (lowerIndex == 'ndvi'):
return filteredImageNDVIToMapId(startDate, endDate)
elif (lowerIndex == 'evi'):
return filteredImageEVIToMapId(startDate, endDate)
elif (lowerIndex == 'evi2'):
return filteredImageEVI2ToMapId(startDate, endDate)
elif (lowerIndex == 'ndmi'):
return filteredImageNDMIToMapId(startDate, endDate)
elif (lowerIndex == 'ndwi'):
return filteredImageNDWIToMapId(startDate, endDate)
def filteredImageCompositeToMapId(assetId, visParams, startDate, endDate, metadataCloudCoverMax, simpleCompositeVariable):
eeCollection = ee.ImageCollection(assetId)
if (startDate and endDate):
eeCollection = eeCollection.filterDate(startDate, endDate)
eeCollection.filterMetadata(
'CLOUD_COVER',
'less_than',
metadataCloudCoverMax
)
eeMosaicImage = ee.Algorithms.Landsat.simpleComposite(
eeCollection,
simpleCompositeVariable,
10,
40,
True
)
return imageToMapId(eeMosaicImage, visParams)
def filteredSentinelComposite(visParams, startDate, endDate, metadataCloudCoverMax):
def cloudScore(img):
def rescale(img, exp, thresholds):
return img.expression(exp, {'img': img}).subtract(thresholds[0]).divide(thresholds[1] - thresholds[0])
score = ee.Image(1.0)
score = score.min(rescale(img, 'img.B2', [0.1, 0.3]))
score = score.min(rescale(img, 'img.B4 + img.B3 + img.B2', [0.2, 0.8]))
score = score.min(
rescale(img, 'img.B8 + img.B11 + img.B12', [0.3, 0.8]))
ndsi = img.normalizedDifference(['B3', 'B11'])
return score.min(rescale(ndsi, 'img', [0.8, 0.6]))
def cloudScoreS2(img):
rescale = img.divide(10000)
score = cloudScore(rescale).multiply(100).rename('cloudscore')
return img.addBands(score)
sentinel2 = ee.ImageCollection('COPERNICUS/S2')
f2017s2 = sentinel2.filterDate(startDate, endDate).filterMetadata(
'CLOUDY_PIXEL_PERCENTAGE', 'less_than', metadataCloudCoverMax)
m2017s2 = f2017s2.map(cloudScoreS2)
m2017s3 = m2017s2.median()
return imageToMapId(m2017s3, visParams)
def filteredSentinelSARComposite(visParams, startDate, endDate):
def toNatural(img):
return ee.Image(10).pow(img.divide(10))
def addRatioBands(img):
# not using angle band
vv = img.select('VV')
vh = img.select('VH')
vv_vh = vv.divide(vh).rename('VV/VH')
vh_vv = vh.divide(vv).rename('VH/VV')
return vv.addBands(vh).addBands(vv_vh).addBands(vh_vv)
sentinel1 = ee.ImageCollection('COPERNICUS/S1_GRD')
sentinel1 = sentinel1.filterDate(startDate, endDate) \
.filter(ee.Filter.listContains('transmitterReceiverPolarisation', 'VV')) \
.filter(ee.Filter.listContains('transmitterReceiverPolarisation', 'VH')) \
.filter(ee.Filter.eq('instrumentMode', 'IW'))
sentinel1 = sentinel1.map(toNatural)
sentinel1 = sentinel1.map(addRatioBands)
median = sentinel1.median()
return imageToMapId(median, visParams)
########## Time Series ##########
def getTimeSeriesByCollectionAndIndex(assetId, indexName, scale, coords, startDate, endDate, reducer):
geometry = None
indexCollection = None
if isinstance(coords[0], list):
geometry = ee.Geometry.Polygon(coords)
else:
geometry = ee.Geometry.Point(coords)
if indexName != None:
indexCollection = ee.ImageCollection(assetId).filterDate(
startDate, endDate).select(indexName)
else:
indexCollection = ee.ImageCollection(
assetId).filterDate(startDate, endDate)
def getIndex(image):
theReducer = getReducer(reducer)
if indexName != None:
indexValue = image.reduceRegion(
theReducer, geometry, scale).get(indexName)
else:
indexValue = image.reduceRegion(theReducer, geometry, scale)
date = image.get('system:time_start')
indexImage = ee.Image().set(
'indexValue', [ee.Number(date), indexValue])
return indexImage
def getClipped(image):
return image.clip(geometry)
clippedcollection = indexCollection.map(getClipped)
indexCollection1 = clippedcollection.map(getIndex)
indexCollection2 = indexCollection1.aggregate_array('indexValue')
return indexCollection2.getInfo()
def getTimeSeriesByIndex(indexName, scale, coords, startDate, endDate, reducer):
bandsByCollection = {
'LANDSAT/LC08/C01/T1_TOA': ['B2', 'B3', 'B4', 'B5', 'B6', 'B7'],
'LANDSAT/LC08/C01/T2_TOA': ['B2', 'B3', 'B4', 'B5', 'B6', 'B7'],
'LANDSAT/LE07/C01/T1_TOA': ['B1', 'B2', 'B3', 'B4', 'B5', 'B7'],
'LANDSAT/LE07/C01/T2_TOA': ['B1', 'B2', 'B3', 'B4', 'B5', 'B7'],
'LANDSAT/LT05/C01/T1_TOA': ['B1', 'B2', 'B3', 'B4', 'B5', 'B7'],
'LANDSAT/LT05/C01/T2_TOA': ['B1', 'B2', 'B3', 'B4', 'B5', 'B7'],
'LANDSAT/LT04/C01/T1_TOA': ['B1', 'B2', 'B3', 'B4', 'B5', 'B7'],
'LANDSAT/LT04/C01/T2_TOA': ['B1', 'B2', 'B3', 'B4', 'B5', 'B7']
}
indexes = {
'NDVI': '(nir - red) / (nir + red)',
'EVI': '2.5 * (nir - red) / (nir + 6.0 * red - 7.5 * blue + 1)',
'EVI2': '2.5 * (nir - red) / (nir + 2.4 * red + 1)',
'NDMI': '(nir - swir1) / (nir + swir1)',
'NDWI': '(green - nir) / (green + nir)',
'NBR': '(nir - swir2) / (nir + swir2)',
'LSAVI': '((nir - red) / (nir + red + 0.5)) * (1 + 0.5)'
}
def create(name):
def maskClouds(image):
def isSet(types):
""" https://landsat.usgs.gov/collectionqualityband """
typeByValue = {
'badPixels': 15,
'cloud': 16,
'shadow': 256,
'snow': 1024,
'cirrus': 4096
}
anySet = ee.Image(0)
for Type in types:
anySet = anySet.Or(image.select(
'BQA').bitwiseAnd(typeByValue[Type]).neq(0))
return anySet
return image.updateMask(isSet(['badPixels', 'cloud', 'shadow', 'cirrus']).Not())
def toIndex(image):
bands = bandsByCollection[name]
return image.expression(indexes[indexName], {
'blue': image.select(bands[0]),
'green': image.select(bands[1]),
'red': image.select(bands[2]),
'nir': image.select(bands[3]),
'swir1': image.select(bands[4]),
'swir2': image.select(bands[5]),
}).clamp(-1, 1).rename(['index'])
def toIndexWithTimeStart(image):
time = image.get('system:time_start')
image = maskClouds(image)
return toIndex(image).set('system:time_start', time)
#
if startDate and endDate:
return ee.ImageCollection(name).filterDate(startDate, endDate).filterBounds(geometry).map(toIndexWithTimeStart, True)
else:
return ee.ImageCollection(name).filterBounds(geometry).map(toIndexWithTimeStart, True)
def reduceRegion(image):
theReducer = getReducer(reducer)
reduced = image.reduceRegion(
theReducer, geometry=geometry, scale=scale, maxPixels=1e6)
return ee.Feature(None, {
'index': reduced.get('index'),
'timeIndex': [image.get('system:time_start'), reduced.get('index')]
})
geometry = None
if isinstance(coords[0], list) or isinstance(coords[0], tuple):
geometry = ee.Geometry.Polygon(coords)
else:
geometry = ee.Geometry.Point(coords)
collection = ee.ImageCollection([])
for name in bandsByCollection:
collection = collection.merge(create(name))
return ee.ImageCollection(ee.ImageCollection(collection).sort('system:time_start').distinct('system:time_start')) \
.map(reduceRegion) \
.filterMetadata('index', 'not_equals', None) \
.aggregate_array('timeIndex') \
.getInfo()
########## Degradation##########
def getDegradationTileUrlByDateS1(geometry, date, visParams):
imDate = datetime.datetime.strptime(date, "%Y-%m-%d")
befDate = imDate - datetime.timedelta(days=1)
aftDate = imDate + datetime.timedelta(days=1)
if isinstance(geometry[0], list):
geometry = ee.Geometry.Polygon(geometry)
else:
geometry = ee.Geometry.Point(geometry)
sentinel1Data = getS1({
"targetBands": ['VV', 'VH', 'VV/VH'],
'region': geometry})
start = befDate.strftime('%Y-%m-%d')
end = aftDate.strftime('%Y-%m-%d')
selectedImage = sentinel1Data.filterDate(start, end).first()
selectedImage = ee.Image(selectedImage)
mapparams = selectedImage.getMapId(visParams)
return mapparams['tile_fetcher'].url_format
def getDegradationPlotsByPointS1(geometry, start, end):
if isinstance(geometry[0], list):
geometry = ee.Geometry.Polygon(geometry)
else:
geometry = ee.Geometry.Point(geometry)
sentinel1Data = getS1({
"targetBands": ['VV', 'VH', 'VV/VH'],
'region': geometry
}).filterDate(start, end)
def myimageMapper(img):
theReducer = ee.Reducer.mean()
indexValue = img.reduceRegion(theReducer, geometry, 30)
date = img.get('system:time_start')
visParams = {'bands': ['VV', 'VH', 'ratioVVVH'],
'min': [-15, -25, .40], 'max': [0, -10, 1], 'gamma': 1.6}
indexImage = ee.Image().set(
'indexValue', [ee.Number(date), indexValue])
return indexImage
lsd = sentinel1Data.map(myimageMapper, True)
indexCollection2 = lsd.aggregate_array('indexValue')
values = indexCollection2.getInfo()
return values
def getDegradationTileUrlByDate(geometry, date, visParams):
imDate = datetime.datetime.strptime(date, "%Y-%m-%d")
startDate = imDate - datetime.timedelta(days=1)
endDate = imDate + datetime.timedelta(days=1)
if isinstance(geometry[0], list):
geometry = ee.Geometry.Polygon(geometry)
else:
geometry = ee.Geometry.Point(geometry)
landsatData = getLandsat({
"start": startDate.strftime('%Y-%m-%d'),
"end": endDate.strftime('%Y-%m-%d'),
"targetBands": ['RED', 'GREEN', 'BLUE', 'SWIR1', 'NIR'],
"region": geometry,
"sensors": {"l4": False, "l5": False, "l7": False, "l8": True}
})
selectedImage = landsatData.first()
unmasked = ee.Image(selectedImage).multiply(10000).toInt16().unmask()
mapparams = unmasked.getMapId(visParams)
return mapparams['tile_fetcher'].url_format
def getDegradationPlotsByPoint(geometry, start, end, band):
if isinstance(geometry[0], list):
geometry = ee.Geometry.Polygon(geometry)
else:
geometry = ee.Geometry.Point(geometry)
landsatData = getLandsat({
"start": start,
"end": end,
"targetBands": [band],
"region": geometry,
"sensors": {"l4": True, "l5": True, "l7": True, "l8": True}
})
def myImageMapper(img):
theReducer = ee.Reducer.mean()
indexValue = img.reduceRegion(theReducer, geometry, 30)
date = img.get('system:time_start')
indexImage = ee.Image().set(
'indexValue',
[ee.Number(date), indexValue]
)
return indexImage
lsd = landsatData.map(myImageMapper, True)
indexCollection2 = lsd.aggregate_array('indexValue')
values = indexCollection2.getInfo()
return values
########## Stats ##########
def getStatistics(extent):
extentGeom = ee.Geometry.Polygon(extent)
elev = ee.Image('USGS/GTOPO30')
minmaxElev = elev.reduceRegion(
ee.Reducer.minMax(), extentGeom, 1000, maxPixels=500000000)
minElev = minmaxElev.get('elevation_min').getInfo()
maxElev = minmaxElev.get('elevation_max').getInfo()
ciesinPopGrid = ee.Image('CIESIN/GPWv4/population-count/2020')
popDict = ciesinPopGrid.reduceRegion(
ee.Reducer.sum(), extentGeom, maxPixels=500000000)
pop = popDict.get('population-count').getInfo()
pop = int(pop)
return {
'minElev': minElev,
'maxElev': maxElev,
'pop': pop
}
| [((96, 14, 96, 29), 'ee.Image', 'ee.Image', ({(96, 23, 96, 28): 'image'}, {}), '(image)', False, 'import ee\n'), ((107, 19, 107, 46), 'ee.ImageCollection', 'ee.ImageCollection', ({(107, 38, 107, 45): 'assetId'}, {}), '(assetId)', False, 'import ee\n'), ((120, 19, 120, 46), 'ee.ImageCollection', 'ee.ImageCollection', ({(120, 38, 120, 45): 'assetId'}, {}), '(assetId)', False, 'import ee\n'), ((160, 9, 160, 48), 'ee.FeatureCollection', 'ee.FeatureCollection', ({(160, 30, 160, 47): 'featureCollection'}, {}), '(featureCollection)', False, 'import ee\n'), ((171, 12, 171, 25), 'ee.Image', 'ee.Image', ({(171, 21, 171, 24): '1.0'}, {}), '(1.0)', False, 'import ee\n'), ((224, 11, 224, 75), 'ee.Array', 'ee.Array', ({(224, 20, 224, 74): '[[0.977], [1.005], [0.982], [1.001], [1.001], [0.996]]'}, {}), '([[0.977], [1.005], [0.982], [1.001], [1.001], [0.996]])', False, 'import ee\n'), ((225, 11, 226, 56), 'ee.Array', 'ee.Array', ({(225, 20, 226, 55): '[[-0.00411], [-0.00093], [0.00094], [-0.00029], [-0.00015], [-0.00097]]'}, {}), '([[-0.00411], [-0.00093], [0.00094], [-0.00029], [-0.00015], [-\n 0.00097]])', False, 'import ee\n'), ((346, 19, 346, 46), 'ee.ImageCollection', 'ee.ImageCollection', ({(346, 38, 346, 45): 'assetId'}, {}), '(assetId)', False, 'import ee\n'), ((354, 20, 360, 5), 'ee.Algorithms.Landsat.simpleComposite', 'ee.Algorithms.Landsat.simpleComposite', ({(355, 8, 355, 20): 'eeCollection', (356, 8, 356, 31): 'simpleCompositeVariable', (357, 8, 357, 10): '10', (358, 8, 358, 10): '40', (359, 8, 359, 12): 'True'}, {}), '(eeCollection, simpleCompositeVariable,\n 10, 40, True)', False, 'import ee\n'), ((382, 16, 382, 51), 'ee.ImageCollection', 'ee.ImageCollection', ({(382, 35, 382, 50): '"""COPERNICUS/S2"""'}, {}), "('COPERNICUS/S2')", False, 'import ee\n'), ((402, 16, 402, 55), 'ee.ImageCollection', 'ee.ImageCollection', ({(402, 35, 402, 54): '"""COPERNICUS/S1_GRD"""'}, {}), "('COPERNICUS/S1_GRD')", False, 'import ee\n'), ((525, 17, 525, 39), 'ee.ImageCollection', 'ee.ImageCollection', ({(525, 36, 525, 38): '[]'}, {}), '([])', False, 'import ee\n'), ((538, 13, 538, 57), 'datetime.datetime.strptime', 'datetime.datetime.strptime', ({(538, 40, 538, 44): 'date', (538, 46, 538, 56): '"""%Y-%m-%d"""'}, {}), "(date, '%Y-%m-%d')", False, 'import datetime\n'), ((547, 20, 549, 28), 'gee.inputs.getS1', 'getS1', ({(547, 26, 549, 27): "{'targetBands': ['VV', 'VH', 'VV/VH'], 'region': geometry}"}, {}), "({'targetBands': ['VV', 'VH', 'VV/VH'], 'region': geometry})", False, 'from gee.inputs import getLandsat, getS1\n'), ((556, 20, 556, 43), 'ee.Image', 'ee.Image', ({(556, 29, 556, 42): 'selectedImage'}, {}), '(selectedImage)', False, 'import ee\n'), ((588, 13, 588, 57), 'datetime.datetime.strptime', 'datetime.datetime.strptime', ({(588, 40, 588, 44): 'date', (588, 46, 588, 56): '"""%Y-%m-%d"""'}, {}), "(date, '%Y-%m-%d')", False, 'import datetime\n'), ((614, 18, 620, 6), 'gee.inputs.getLandsat', 'getLandsat', ({(614, 29, 620, 5): "{'start': start, 'end': end, 'targetBands': [band], 'region': geometry,\n 'sensors': {'l4': True, 'l5': True, 'l7': True, 'l8': True}}"}, {}), "({'start': start, 'end': end, 'targetBands': [band], 'region':\n geometry, 'sensors': {'l4': True, 'l5': True, 'l7': True, 'l8': True}})", False, 'from gee.inputs import getLandsat, getS1\n'), ((640, 17, 640, 44), 'ee.Geometry.Polygon', 'ee.Geometry.Polygon', ({(640, 37, 640, 43): 'extent'}, {}), '(extent)', False, 'import ee\n'), ((641, 11, 641, 35), 'ee.Image', 'ee.Image', ({(641, 20, 641, 34): '"""USGS/GTOPO30"""'}, {}), "('USGS/GTOPO30')", False, 'import ee\n'), ((646, 20, 646, 66), 'ee.Image', 'ee.Image', ({(646, 29, 646, 65): '"""CIESIN/GPWv4/population-count/2020"""'}, {}), "('CIESIN/GPWv4/population-count/2020')", False, 'import ee\n'), ((28, 15, 28, 31), 'ee.Reducer.min', 'ee.Reducer.min', ({}, {}), '()', False, 'import ee\n'), ((86, 18, 86, 32), 'ee.Image', 'ee.Image', ({(86, 27, 86, 31): 'name'}, {}), '(name)', False, 'import ee\n'), ((109, 23, 109, 57), 'ee.Filter.date', 'ee.Filter.date', ({(109, 38, 109, 47): 'startDate', (109, 49, 109, 56): 'endDate'}, {}), '(startDate, endDate)', False, 'import ee\n'), ((133, 23, 133, 57), 'ee.Filter.date', 'ee.Filter.date', ({(133, 38, 133, 47): 'startDate', (133, 49, 133, 56): 'endDate'}, {}), '(startDate, endDate)', False, 'import ee\n'), ((161, 23, 161, 55), 'ee.Filter.equals', 'ee.Filter.equals', ({(161, 40, 161, 45): 'field', (161, 47, 161, 54): 'matchID'}, {}), '(field, matchID)', False, 'import ee\n'), ((210, 23, 210, 38), 'math.pow', 'math.pow', ({(210, 32, 210, 33): '2', (210, 35, 210, 37): '10'}, {}), '(2, 10)', False, 'import math\n'), ((211, 24, 211, 39), 'math.pow', 'math.pow', ({(211, 33, 211, 34): '2', (211, 36, 211, 38): '11'}, {}), '(2, 11)', False, 'import math\n'), ((369, 16, 369, 29), 'ee.Image', 'ee.Image', ({(369, 25, 369, 28): '1.0'}, {}), '(1.0)', False, 'import ee\n'), ((406, 16, 406, 52), 'ee.Filter.eq', 'ee.Filter.eq', ({(406, 29, 406, 45): '"""instrumentMode"""', (406, 47, 406, 51): '"""IW"""'}, {}), "('instrumentMode', 'IW')", False, 'import ee\n'), ((420, 19, 420, 46), 'ee.Geometry.Polygon', 'ee.Geometry.Polygon', ({(420, 39, 420, 45): 'coords'}, {}), '(coords)', False, 'import ee\n'), ((422, 19, 422, 44), 'ee.Geometry.Point', 'ee.Geometry.Point', ({(422, 37, 422, 43): 'coords'}, {}), '(coords)', False, 'import ee\n'), ((522, 19, 522, 46), 'ee.Geometry.Polygon', 'ee.Geometry.Polygon', ({(522, 39, 522, 45): 'coords'}, {}), '(coords)', False, 'import ee\n'), ((524, 19, 524, 44), 'ee.Geometry.Point', 'ee.Geometry.Point', ({(524, 37, 524, 43): 'coords'}, {}), '(coords)', False, 'import ee\n'), ((539, 23, 539, 49), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((540, 23, 540, 49), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((543, 19, 543, 48), 'ee.Geometry.Polygon', 'ee.Geometry.Polygon', ({(543, 39, 543, 47): 'geometry'}, {}), '(geometry)', False, 'import ee\n'), ((545, 19, 545, 46), 'ee.Geometry.Point', 'ee.Geometry.Point', ({(545, 37, 545, 45): 'geometry'}, {}), '(geometry)', False, 'import ee\n'), ((563, 19, 563, 48), 'ee.Geometry.Polygon', 'ee.Geometry.Polygon', ({(563, 39, 563, 47): 'geometry'}, {}), '(geometry)', False, 'import ee\n'), ((565, 19, 565, 46), 'ee.Geometry.Point', 'ee.Geometry.Point', ({(565, 37, 565, 45): 'geometry'}, {}), '(geometry)', False, 'import ee\n'), ((573, 21, 573, 38), 'ee.Reducer.mean', 'ee.Reducer.mean', ({}, {}), '()', False, 'import ee\n'), ((589, 25, 589, 51), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((590, 23, 590, 49), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((592, 19, 592, 48), 'ee.Geometry.Polygon', 'ee.Geometry.Polygon', ({(592, 39, 592, 47): 'geometry'}, {}), '(geometry)', False, 'import ee\n'), ((594, 19, 594, 46), 'ee.Geometry.Point', 'ee.Geometry.Point', ({(594, 37, 594, 45): 'geometry'}, {}), '(geometry)', False, 'import ee\n'), ((611, 19, 611, 48), 'ee.Geometry.Polygon', 'ee.Geometry.Polygon', ({(611, 39, 611, 47): 'geometry'}, {}), '(geometry)', False, 'import ee\n'), ((613, 19, 613, 46), 'ee.Geometry.Point', 'ee.Geometry.Point', ({(613, 37, 613, 45): 'geometry'}, {}), '(geometry)', False, 'import ee\n'), ((623, 21, 623, 38), 'ee.Reducer.mean', 'ee.Reducer.mean', ({}, {}), '()', False, 'import ee\n'), ((643, 8, 643, 27), 'ee.Reducer.minMax', 'ee.Reducer.minMax', ({}, {}), '()', False, 'import ee\n'), ((648, 8, 648, 24), 'ee.Reducer.sum', 'ee.Reducer.sum', ({}, {}), '()', False, 'import ee\n'), ((16, 42, 16, 69), 'os.path.exists', 'os.path.exists', ({(16, 57, 16, 68): 'ee_key_path'}, {}), '(ee_key_path)', False, 'import os\n'), ((17, 26, 17, 79), 'ee.ServiceAccountCredentials', 'ee.ServiceAccountCredentials', ({(17, 55, 17, 65): 'ee_account', (17, 67, 17, 78): 'ee_key_path'}, {}), '(ee_account, ee_key_path)', False, 'import ee\n'), ((18, 12, 18, 38), 'ee.Initialize', 'ee.Initialize', ({(18, 26, 18, 37): 'credentials'}, {}), '(credentials)', False, 'import ee\n'), ((20, 12, 20, 27), 'ee.Initialize', 'ee.Initialize', ({}, {}), '()', False, 'import ee\n'), ((30, 15, 30, 31), 'ee.Reducer.max', 'ee.Reducer.max', ({}, {}), '()', False, 'import ee\n'), ((70, 19, 70, 34), 'json.loads', 'json.loads', ({(70, 30, 70, 33): 'val'}, {}), '(val)', False, 'import json\n'), ((174, 32, 174, 46), 'ee.Number', 'ee.Number', ({(174, 42, 174, 45): '0.1'}, {}), '(0.1)', False, 'import ee\n'), ((181, 32, 181, 46), 'ee.Number', 'ee.Number', ({(181, 42, 181, 45): '0.2'}, {}), '(0.2)', False, 'import ee\n'), ((188, 32, 188, 46), 'ee.Number', 'ee.Number', ({(188, 42, 188, 45): '0.3'}, {}), '(0.3)', False, 'import ee\n'), ((193, 32, 193, 46), 'ee.Number', 'ee.Number', ({(193, 42, 193, 45): '300'}, {}), '(300)', False, 'import ee\n'), ((199, 32, 199, 46), 'ee.Number', 'ee.Number', ({(199, 42, 199, 45): '0.8'}, {}), '(0.8)', False, 'import ee\n'), ((567, 20, 570, 6), 'gee.inputs.getS1', 'getS1', ({(567, 26, 570, 5): "{'targetBands': ['VV', 'VH', 'VV/VH'], 'region': geometry}"}, {}), "({'targetBands': ['VV', 'VH', 'VV/VH'], 'region': geometry})", False, 'from gee.inputs import getLandsat, getS1\n'), ((32, 15, 32, 32), 'ee.Reducer.mean', 'ee.Reducer.mean', ({}, {}), '()', False, 'import ee\n'), ((84, 18, 84, 42), 'ee.ImageCollection', 'ee.ImageCollection', ({(84, 37, 84, 41): 'name'}, {}), '(name)', False, 'import ee\n'), ((173, 47, 173, 61), 'ee.Number', 'ee.Number', ({(173, 57, 173, 60): '0.1'}, {}), '(0.1)', False, 'import ee\n'), ((174, 8, 174, 22), 'ee.Number', 'ee.Number', ({(174, 18, 174, 21): '0.3'}, {}), '(0.3)', False, 'import ee\n'), ((180, 39, 180, 53), 'ee.Number', 'ee.Number', ({(180, 49, 180, 52): '0.2'}, {}), '(0.2)', False, 'import ee\n'), ((181, 8, 181, 22), 'ee.Number', 'ee.Number', ({(181, 18, 181, 21): '0.8'}, {}), '(0.8)', False, 'import ee\n'), ((187, 41, 187, 55), 'ee.Number', 'ee.Number', ({(187, 51, 187, 54): '0.3'}, {}), '(0.3)', False, 'import ee\n'), ((188, 8, 188, 22), 'ee.Number', 'ee.Number', ({(188, 18, 188, 21): '0.8'}, {}), '(0.8)', False, 'import ee\n'), ((192, 47, 192, 61), 'ee.Number', 'ee.Number', ({(192, 57, 192, 60): '300'}, {}), '(300)', False, 'import ee\n'), ((193, 8, 193, 22), 'ee.Number', 'ee.Number', ({(193, 18, 193, 21): '290'}, {}), '(290)', False, 'import ee\n'), ((198, 33, 198, 47), 'ee.Number', 'ee.Number', ({(198, 43, 198, 46): '0.8'}, {}), '(0.8)', False, 'import ee\n'), ((199, 8, 199, 22), 'ee.Number', 'ee.Number', ({(199, 18, 199, 21): '0.6'}, {}), '(0.6)', False, 'import ee\n'), ((392, 15, 392, 27), 'ee.Image', 'ee.Image', ({(392, 24, 392, 26): '(10)'}, {}), '(10)', False, 'import ee\n'), ((405, 16, 405, 79), 'ee.Filter.listContains', 'ee.Filter.listContains', ({(405, 39, 405, 72): '"""transmitterReceiverPolarisation"""', (405, 74, 405, 78): '"""VH"""'}, {}), "('transmitterReceiverPolarisation', 'VH')", False, 'import ee\n'), ((427, 26, 428, 20), 'ee.ImageCollection', 'ee.ImageCollection', ({(428, 12, 428, 19): 'assetId'}, {}), '(assetId)', False, 'import ee\n'), ((438, 21, 438, 31), 'ee.Image', 'ee.Image', ({}, {}), '()', False, 'import ee\n'), ((439, 27, 439, 42), 'ee.Number', 'ee.Number', ({(439, 37, 439, 41): 'date'}, {}), '(date)', False, 'import ee\n'), ((483, 25, 483, 36), 'ee.Image', 'ee.Image', ({(483, 34, 483, 35): '0'}, {}), '(0)', False, 'import ee\n'), ((578, 21, 578, 31), 'ee.Image', 'ee.Image', ({}, {}), '()', False, 'import ee\n'), ((579, 27, 579, 42), 'ee.Number', 'ee.Number', ({(579, 37, 579, 41): 'date'}, {}), '(date)', False, 'import ee\n'), ((626, 21, 626, 31), 'ee.Image', 'ee.Image', ({}, {}), '()', False, 'import ee\n'), ((628, 13, 628, 28), 'ee.Number', 'ee.Number', ({(628, 23, 628, 27): 'date'}, {}), '(date)', False, 'import ee\n'), ((34, 15, 34, 32), 'ee.Reducer.mode', 'ee.Reducer.mode', ({}, {}), '()', False, 'import ee\n'), ((162, 12, 162, 22), 'ee.Image', 'ee.Image', ({}, {}), '()', False, 'import ee\n'), ((36, 15, 36, 33), 'ee.Reducer.first', 'ee.Reducer.first', ({}, {}), '()', False, 'import ee\n'), ((404, 16, 404, 79), 'ee.Filter.listContains', 'ee.Filter.listContains', ({(404, 39, 404, 72): '"""transmitterReceiverPolarisation"""', (404, 74, 404, 78): '"""VV"""'}, {}), "('transmitterReceiverPolarisation', 'VV')", False, 'import ee\n'), ((424, 26, 424, 53), 'ee.ImageCollection', 'ee.ImageCollection', ({(424, 45, 424, 52): 'assetId'}, {}), '(assetId)', False, 'import ee\n'), ((38, 15, 38, 32), 'ee.Reducer.last', 'ee.Reducer.last', ({}, {}), '()', False, 'import ee\n'), ((247, 10, 247, 51), 'ee.ImageCollection', 'ee.ImageCollection', ({(247, 29, 247, 50): '"""LANDSAT/LT4_L1T_TOA"""'}, {}), "('LANDSAT/LT4_L1T_TOA')", False, 'import ee\n'), ((250, 10, 250, 51), 'ee.ImageCollection', 'ee.ImageCollection', ({(250, 29, 250, 50): '"""LANDSAT/LT5_L1T_TOA"""'}, {}), "('LANDSAT/LT5_L1T_TOA')", False, 'import ee\n'), ((253, 10, 253, 51), 'ee.ImageCollection', 'ee.ImageCollection', ({(253, 29, 253, 50): '"""LANDSAT/LE7_L1T_TOA"""'}, {}), "('LANDSAT/LE7_L1T_TOA')", False, 'import ee\n'), ((256, 10, 256, 55), 'ee.ImageCollection', 'ee.ImageCollection', ({(256, 29, 256, 54): '"""LANDSAT/LC08/C01/T1_TOA"""'}, {}), "('LANDSAT/LC08/C01/T1_TOA')", False, 'import ee\n'), ((509, 19, 509, 43), 'ee.ImageCollection', 'ee.ImageCollection', ({(509, 38, 509, 42): 'name'}, {}), '(name)', False, 'import ee\n'), ((604, 15, 604, 38), 'ee.Image', 'ee.Image', ({(604, 24, 604, 37): 'selectedImage'}, {}), '(selectedImage)', False, 'import ee\n'), ((40, 15, 40, 31), 'ee.Reducer.sum', 'ee.Reducer.sum', ({}, {}), '()', False, 'import ee\n'), ((42, 15, 42, 34), 'ee.Reducer.median', 'ee.Reducer.median', ({}, {}), '()', False, 'import ee\n'), ((231, 64, 231, 78), 'ee.Image', 'ee.Image', ({(231, 73, 231, 77): 'bias'}, {}), '(bias)', False, 'import ee\n'), ((259, 9, 259, 44), 'ee.ImageCollection', 'ee.ImageCollection', ({(259, 28, 259, 43): '"""COPERNICUS/S2"""'}, {}), "('COPERNICUS/S2')", False, 'import ee\n'), ((507, 19, 507, 43), 'ee.ImageCollection', 'ee.ImageCollection', ({(507, 38, 507, 42): 'name'}, {}), '(name)', False, 'import ee\n'), ((231, 22, 231, 36), 'ee.Image', 'ee.Image', ({(231, 31, 231, 35): 'gain'}, {}), '(gain)', False, 'import ee\n'), ((528, 30, 528, 60), 'ee.ImageCollection', 'ee.ImageCollection', ({(528, 49, 528, 59): 'collection'}, {}), '(collection)', False, 'import ee\n')] |
shubhamguptaorg/user_managementl | userManagement/management/urls.py | ad98e0e4886d9b0547b05ae424c10d8f6268d470 | from django.contrib import admin
from django.urls import path,include
from django.views.generic import TemplateView
from .views import Index,SignUp,UserDashboard,AdminDashboard,logout,showAdminData,deleteuser,activeUser,deactiveUser,UserDetailEdit,uploadImage
# from .views import Index,UserDashboard,SignUp,AdminDashboard
app_name='management'
urlpatterns = [
# path('',homepage,name="index"),
path('',Index.as_view(), name='index'),
path('signup',SignUp.as_view(),name="signup"),
path('userdashboard',UserDashboard.as_view(),name="userDashboard"),
path('admindashboard',AdminDashboard.as_view(),name="adminDashboard"),
path('admindashboard/showuserdata/',showAdminData.as_view(),name='showAdminData'),
path('admindashboard/showuserdata/deleteuser/<userId>',deleteuser,name='deleteuser'),
path('admindashboard/showuserdata/activeUser/<userId>', activeUser, name='activeUser'),
path('admindashboard/showuserdata/deactiveUser/<userId>', deactiveUser, name='deactiveUser'),
path('uploadimage/',uploadImage,name="uploadImage"),
path('editUserDetail/',UserDetailEdit.as_view(),name='userEditDetail'),
path('logout',logout,name='logout')
]
| [((16, 4, 16, 88), 'django.urls.path', 'path', (), '', False, 'from django.urls import path, include\n'), ((17, 4, 17, 90), 'django.urls.path', 'path', (), '', False, 'from django.urls import path, include\n'), ((18, 4, 18, 96), 'django.urls.path', 'path', (), '', False, 'from django.urls import path, include\n'), ((19, 4, 19, 55), 'django.urls.path', 'path', (), '', False, 'from django.urls import path, include\n'), ((21, 4, 21, 39), 'django.urls.path', 'path', (), '', False, 'from django.urls import path, include\n')] |
Branlala/docker-sickbeardfr | sickbeard/lib/hachoir_parser/container/riff.py | 3ac85092dc4cc8a4171fb3c83e9682162245e13e | # -*- coding: UTF-8 -*-
"""
RIFF parser, able to parse:
* AVI video container
* WAV audio container
* CDA file
Documents:
- libavformat source code from ffmpeg library
http://ffmpeg.mplayerhq.hu/
- Video for Windows Programmer's Guide
http://www.opennet.ru/docs/formats/avi.txt
- What is an animated cursor?
http://www.gdgsoft.com/anituner/help/aniformat.htm
Authors:
* Aurélien Jacobs
* Mickaël KENIKSSI
* Victor Stinner
Changelog:
* 2007-03-30: support ACON (animated icons)
* 2006-08-08: merge AVI, WAV and CDA parsers into RIFF parser
* 2006-08-03: creation of CDA parser by Mickaël KENIKSSI
* 2005-06-21: creation of WAV parser by Victor Stinner
* 2005-06-08: creation of AVI parser by Victor Stinner and Aurélien Jacobs
Thanks to:
* Wojtek Kaniewski (wojtekka AT logonet.com.pl) for its CDA file
format information
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet, ParserError,
UInt8, UInt16, UInt32, Enum,
Bit, NullBits, NullBytes,
RawBytes, String, PaddingBytes,
SubFile)
from lib.hachoir_core.tools import alignValue, humanDuration
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_core.text_handler import filesizeHandler, textHandler
from lib.hachoir_parser.video.fourcc import audio_codec_name, video_fourcc_name
from lib.hachoir_parser.image.ico import IcoFile
from datetime import timedelta
def parseText(self):
yield String(self, "text", self["size"].value,
strip=" \0", truncate="\0",
charset="ISO-8859-1")
def parseRawFormat(self, size):
yield RawBytes(self, "raw_format", size)
def parseVideoFormat(self, size):
yield UInt32(self, "video_size", "Video format: Size")
yield UInt32(self, "width", "Video format: Width")
yield UInt32(self, "height", "Video format: Height")
yield UInt16(self, "panes", "Video format: Panes")
yield UInt16(self, "depth", "Video format: Depth")
yield UInt32(self, "tag1", "Video format: Tag1")
yield UInt32(self, "img_size", "Video format: Image size")
yield UInt32(self, "xpels_meter", "Video format: XPelsPerMeter")
yield UInt32(self, "ypels_meter", "Video format: YPelsPerMeter")
yield UInt32(self, "clr_used", "Video format: ClrUsed")
yield UInt32(self, "clr_important", "Video format: ClrImportant")
def parseAudioFormat(self, size):
yield Enum(UInt16(self, "codec", "Audio format: Codec id"), audio_codec_name)
yield UInt16(self, "channel", "Audio format: Channels")
yield UInt32(self, "sample_rate", "Audio format: Sample rate")
yield UInt32(self, "bit_rate", "Audio format: Bit rate")
yield UInt16(self, "block_align", "Audio format: Block align")
if size >= 16:
yield UInt16(self, "bits_per_sample", "Audio format: Bits per sample")
if size >= 18:
yield UInt16(self, "ext_size", "Audio format: Size of extra information")
if size >= 28: # and self["a_channel"].value > 2
yield UInt16(self, "reserved", "Audio format: ")
yield UInt32(self, "channel_mask", "Audio format: channels placement bitmask")
yield UInt32(self, "subformat", "Audio format: Subformat id")
def parseAVIStreamFormat(self):
size = self["size"].value
strtype = self["../stream_hdr/stream_type"].value
TYPE_HANDLER = {
"vids": (parseVideoFormat, 40),
"auds": (parseAudioFormat, 16)
}
handler = parseRawFormat
if strtype in TYPE_HANDLER:
info = TYPE_HANDLER[strtype]
if info[1] <= size:
handler = info[0]
for field in handler(self, size):
yield field
def parseAVIStreamHeader(self):
if self["size"].value != 56:
raise ParserError("Invalid stream header size")
yield String(self, "stream_type", 4, "Stream type four character code", charset="ASCII")
field = String(self, "fourcc", 4, "Stream four character code", strip=" \0", charset="ASCII")
if self["stream_type"].value == "vids":
yield Enum(field, video_fourcc_name, lambda text: text.upper())
else:
yield field
yield UInt32(self, "flags", "Stream flags")
yield UInt16(self, "priority", "Stream priority")
yield String(self, "language", 2, "Stream language", charset="ASCII", strip="\0")
yield UInt32(self, "init_frames", "InitialFrames")
yield UInt32(self, "scale", "Time scale")
yield UInt32(self, "rate", "Divide by scale to give frame rate")
yield UInt32(self, "start", "Stream start time (unit: rate/scale)")
yield UInt32(self, "length", "Stream length (unit: rate/scale)")
yield UInt32(self, "buf_size", "Suggested buffer size")
yield UInt32(self, "quality", "Stream quality")
yield UInt32(self, "sample_size", "Size of samples")
yield UInt16(self, "left", "Destination rectangle (left)")
yield UInt16(self, "top", "Destination rectangle (top)")
yield UInt16(self, "right", "Destination rectangle (right)")
yield UInt16(self, "bottom", "Destination rectangle (bottom)")
class RedBook(FieldSet):
"""
RedBook offset parser, used in CD audio (.cda) file
"""
def createFields(self):
yield UInt8(self, "frame")
yield UInt8(self, "second")
yield UInt8(self, "minute")
yield PaddingBytes(self, "notused", 1)
def formatSerialNumber(field):
"""
Format an disc serial number.
Eg. 0x00085C48 => "0008-5C48"
"""
sn = field.value
return "%04X-%04X" % (sn >> 16, sn & 0xFFFF)
def parseCDDA(self):
"""
HSG address format: number of 1/75 second
HSG offset = (minute*60 + second)*75 + frame + 150 (from RB offset)
HSG length = (minute*60 + second)*75 + frame (from RB length)
"""
yield UInt16(self, "cda_version", "CD file version (currently 1)")
yield UInt16(self, "track_no", "Number of track")
yield textHandler(UInt32(self, "disc_serial", "Disc serial number"),
formatSerialNumber)
yield UInt32(self, "hsg_offset", "Track offset (HSG format)")
yield UInt32(self, "hsg_length", "Track length (HSG format)")
yield RedBook(self, "rb_offset", "Track offset (Red-book format)")
yield RedBook(self, "rb_length", "Track length (Red-book format)")
def parseWAVFormat(self):
size = self["size"].value
if size not in (16, 18):
self.warning("Format with size of %s bytes is not supported!" % size)
yield Enum(UInt16(self, "codec", "Audio codec"), audio_codec_name)
yield UInt16(self, "nb_channel", "Number of audio channel")
yield UInt32(self, "sample_per_sec", "Sample per second")
yield UInt32(self, "byte_per_sec", "Average byte per second")
yield UInt16(self, "block_align", "Block align")
yield UInt16(self, "bit_per_sample", "Bits per sample")
def parseWAVFact(self):
yield UInt32(self, "nb_sample", "Number of samples in audio stream")
def parseAviHeader(self):
yield UInt32(self, "microsec_per_frame", "Microsecond per frame")
yield UInt32(self, "max_byte_per_sec", "Maximum byte per second")
yield NullBytes(self, "reserved", 4)
# Flags
yield NullBits(self, "reserved[]", 4)
yield Bit(self, "has_index")
yield Bit(self, "must_use_index")
yield NullBits(self, "reserved[]", 2)
yield Bit(self, "is_interleaved")
yield NullBits(self, "reserved[]", 2)
yield Bit(self, "trust_cktype")
yield NullBits(self, "reserved[]", 4)
yield Bit(self, "was_capture_file")
yield Bit(self, "is_copyrighted")
yield NullBits(self, "reserved[]", 14)
yield UInt32(self, "total_frame", "Total number of frames in the video")
yield UInt32(self, "init_frame", "Initial frame (used in interleaved video)")
yield UInt32(self, "nb_stream", "Number of streams")
yield UInt32(self, "sug_buf_size", "Suggested buffer size")
yield UInt32(self, "width", "Width in pixel")
yield UInt32(self, "height", "Height in pixel")
yield UInt32(self, "scale")
yield UInt32(self, "rate")
yield UInt32(self, "start")
yield UInt32(self, "length")
def parseODML(self):
yield UInt32(self, "total_frame", "Real number of frame of OpenDML video")
padding = self["size"].value - 4
if 0 < padding:
yield NullBytes(self, "padding[]", padding)
class AVIIndexEntry(FieldSet):
size = 16*8
def createFields(self):
yield String(self, "tag", 4, "Tag", charset="ASCII")
yield UInt32(self, "flags")
yield UInt32(self, "start", "Offset from start of movie data")
yield UInt32(self, "length")
def parseIndex(self):
while not self.eof:
yield AVIIndexEntry(self, "index[]")
class Chunk(FieldSet):
TAG_INFO = {
# This dictionnary is edited by RiffFile.validate()
"LIST": ("list[]", None, "Sub-field list"),
"JUNK": ("junk[]", None, "Junk (padding)"),
# Metadata
"INAM": ("title", parseText, "Document title"),
"IART": ("artist", parseText, "Artist"),
"ICMT": ("comment", parseText, "Comment"),
"ICOP": ("copyright", parseText, "Copyright"),
"IENG": ("author", parseText, "Author"),
"ICRD": ("creation_date", parseText, "Creation date"),
"ISFT": ("producer", parseText, "Producer"),
"IDIT": ("datetime", parseText, "Date time"),
# TODO: Todo: see below
# "strn": Stream description
# TWOCC code, movie/field[]/tag.value[2:4]:
# "db": "Uncompressed video frame",
# "dc": "Compressed video frame",
# "wb": "Audio data",
# "pc": "Palette change"
}
subtag_info = {
"INFO": ("info", "File informations"),
"hdrl": ("headers", "Headers"),
"strl": ("stream[]", "Stream header list"),
"movi": ("movie", "Movie stream"),
"odml": ("odml", "ODML"),
}
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
self._size = (8 + alignValue(self["size"].value, 2)) * 8
tag = self["tag"].value
if tag in self.TAG_INFO:
self.tag_info = self.TAG_INFO[tag]
if tag == "LIST":
subtag = self["subtag"].value
if subtag in self.subtag_info:
info = self.subtag_info[subtag]
self.tag_info = (info[0], None, info[1])
self._name = self.tag_info[0]
self._description = self.tag_info[2]
else:
self.tag_info = ("field[]", None, None)
def createFields(self):
yield String(self, "tag", 4, "Tag", charset="ASCII")
yield filesizeHandler(UInt32(self, "size", "Size"))
if not self["size"].value:
return
if self["tag"].value == "LIST":
yield String(self, "subtag", 4, "Sub-tag", charset="ASCII")
handler = self.tag_info[1]
while 8 < (self.size - self.current_size)/8:
field = self.__class__(self, "field[]")
yield field
if (field.size/8) % 2 != 0:
yield UInt8(self, "padding[]", "Padding")
else:
handler = self.tag_info[1]
if handler:
for field in handler(self):
yield field
else:
yield RawBytes(self, "raw_content", self["size"].value)
padding = self.seekBit(self._size)
if padding:
yield padding
def createDescription(self):
tag = self["tag"].display
return u"Chunk (tag %s)" % tag
class ChunkAVI(Chunk):
TAG_INFO = Chunk.TAG_INFO.copy()
TAG_INFO.update({
"strh": ("stream_hdr", parseAVIStreamHeader, "Stream header"),
"strf": ("stream_fmt", parseAVIStreamFormat, "Stream format"),
"avih": ("avi_hdr", parseAviHeader, "AVI header"),
"idx1": ("index", parseIndex, "Stream index"),
"dmlh": ("odml_hdr", parseODML, "ODML header"),
})
class ChunkCDDA(Chunk):
TAG_INFO = Chunk.TAG_INFO.copy()
TAG_INFO.update({
'fmt ': ("cdda", parseCDDA, "CD audio informations"),
})
class ChunkWAVE(Chunk):
TAG_INFO = Chunk.TAG_INFO.copy()
TAG_INFO.update({
'fmt ': ("format", parseWAVFormat, "Audio format"),
'fact': ("nb_sample", parseWAVFact, "Number of samples"),
'data': ("audio_data", None, "Audio stream data"),
})
def parseAnimationHeader(self):
yield UInt32(self, "hdr_size", "Size of header (36 bytes)")
if self["hdr_size"].value != 36:
self.warning("Animation header with unknown size (%s)" % self["size"].value)
yield UInt32(self, "nb_frame", "Number of unique Icons in this cursor")
yield UInt32(self, "nb_step", "Number of Blits before the animation cycles")
yield UInt32(self, "cx")
yield UInt32(self, "cy")
yield UInt32(self, "bit_count")
yield UInt32(self, "planes")
yield UInt32(self, "jiffie_rate", "Default Jiffies (1/60th of a second) if rate chunk not present")
yield Bit(self, "is_icon")
yield NullBits(self, "padding", 31)
def parseAnimationSequence(self):
while not self.eof:
yield UInt32(self, "icon[]")
def formatJiffie(field):
sec = float(field.value) / 60
return humanDuration(timedelta(seconds=sec))
def parseAnimationRate(self):
while not self.eof:
yield textHandler(UInt32(self, "rate[]"), formatJiffie)
def parseIcon(self):
yield SubFile(self, "icon_file", self["size"].value, parser_class=IcoFile)
class ChunkACON(Chunk):
TAG_INFO = Chunk.TAG_INFO.copy()
TAG_INFO.update({
'anih': ("anim_hdr", parseAnimationHeader, "Animation header"),
'seq ': ("anim_seq", parseAnimationSequence, "Animation sequence"),
'rate': ("anim_rate", parseAnimationRate, "Animation sequence"),
'icon': ("icon[]", parseIcon, "Icon"),
})
class RiffFile(Parser):
PARSER_TAGS = {
"id": "riff",
"category": "container",
"file_ext": ("avi", "cda", "wav", "ani"),
"min_size": 16*8,
"mime": (u"video/x-msvideo", u"audio/x-wav", u"audio/x-cda"),
# FIXME: Use regex "RIFF.{4}(WAVE|CDDA|AVI )"
"magic": (
("AVI LIST", 8*8),
("WAVEfmt ", 8*8),
("CDDAfmt ", 8*8),
("ACONanih", 8*8),
),
"description": "Microsoft RIFF container"
}
VALID_TYPES = {
"WAVE": (ChunkWAVE, u"audio/x-wav", u"Microsoft WAVE audio", ".wav"),
"CDDA": (ChunkCDDA, u"audio/x-cda", u"Microsoft Windows audio CD file (cda)", ".cda"),
"AVI ": (ChunkAVI, u"video/x-msvideo", u"Microsoft AVI video", ".avi"),
"ACON": (ChunkACON, u"image/x-ani", u"Microsoft Windows animated cursor", ".ani"),
}
endian = LITTLE_ENDIAN
def validate(self):
if self.stream.readBytes(0, 4) != "RIFF":
return "Wrong signature"
if self["type"].value not in self.VALID_TYPES:
return "Unknown RIFF content type"
return True
def createFields(self):
yield String(self, "signature", 4, "AVI header (RIFF)", charset="ASCII")
yield filesizeHandler(UInt32(self, "filesize", "File size"))
yield String(self, "type", 4, "Content type (\"AVI \", \"WAVE\", ...)", charset="ASCII")
# Choose chunk type depending on file type
try:
chunk_cls = self.VALID_TYPES[self["type"].value][0]
except KeyError:
chunk_cls = Chunk
# Parse all chunks up to filesize
while self.current_size < self["filesize"].value*8+8:
yield chunk_cls(self, "chunk[]")
if not self.eof:
yield RawBytes(self, "padding[]", (self.size-self.current_size)/8)
def createMimeType(self):
try:
return self.VALID_TYPES[self["type"].value][1]
except KeyError:
return None
def createDescription(self):
tag = self["type"].value
if tag == "AVI ":
desc = u"Microsoft AVI video"
if "headers/avi_hdr" in self:
header = self["headers/avi_hdr"]
desc += ": %ux%u pixels" % (header["width"].value, header["height"].value)
microsec = header["microsec_per_frame"].value
if microsec:
desc += ", %.1f fps" % (1000000.0 / microsec)
if "total_frame" in header and header["total_frame"].value:
delta = timedelta(seconds=float(header["total_frame"].value) * microsec)
desc += ", " + humanDuration(delta)
return desc
else:
try:
return self.VALID_TYPES[tag][2]
except KeyError:
return u"Microsoft RIFF container"
def createContentSize(self):
size = (self["filesize"].value + 8) * 8
return min(size, self.stream.size)
def createFilenameSuffix(self):
try:
return self.VALID_TYPES[self["type"].value][3]
except KeyError:
return ".riff"
| [((100, 12, 100, 97), 'lib.hachoir_core.field.String', 'String', (), '', False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((46, 10, 48, 29), 'lib.hachoir_core.field.String', 'String', (), '', False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((51, 10, 51, 44), 'lib.hachoir_core.field.RawBytes', 'RawBytes', ({(51, 19, 51, 23): 'self', (51, 25, 51, 37): '"""raw_format"""', (51, 39, 51, 43): 'size'}, {}), "(self, 'raw_format', size)", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((54, 10, 54, 58), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(54, 17, 54, 21): 'self', (54, 23, 54, 35): '"""video_size"""', (54, 37, 54, 57): '"""Video format: Size"""'}, {}), "(self, 'video_size', 'Video format: Size')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((55, 10, 55, 54), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(55, 17, 55, 21): 'self', (55, 23, 55, 30): '"""width"""', (55, 32, 55, 53): '"""Video format: Width"""'}, {}), "(self, 'width', 'Video format: Width')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((56, 10, 56, 56), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(56, 17, 56, 21): 'self', (56, 23, 56, 31): '"""height"""', (56, 33, 56, 55): '"""Video format: Height"""'}, {}), "(self, 'height', 'Video format: Height')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((57, 10, 57, 54), 'lib.hachoir_core.field.UInt16', 'UInt16', ({(57, 17, 57, 21): 'self', (57, 23, 57, 30): '"""panes"""', (57, 32, 57, 53): '"""Video format: Panes"""'}, {}), "(self, 'panes', 'Video format: Panes')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((58, 10, 58, 54), 'lib.hachoir_core.field.UInt16', 'UInt16', ({(58, 17, 58, 21): 'self', (58, 23, 58, 30): '"""depth"""', (58, 32, 58, 53): '"""Video format: Depth"""'}, {}), "(self, 'depth', 'Video format: Depth')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((59, 10, 59, 52), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(59, 17, 59, 21): 'self', (59, 23, 59, 29): '"""tag1"""', (59, 31, 59, 51): '"""Video format: Tag1"""'}, {}), "(self, 'tag1', 'Video format: Tag1')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((60, 10, 60, 62), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(60, 17, 60, 21): 'self', (60, 23, 60, 33): '"""img_size"""', (60, 35, 60, 61): '"""Video format: Image size"""'}, {}), "(self, 'img_size', 'Video format: Image size')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((61, 10, 61, 68), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(61, 17, 61, 21): 'self', (61, 23, 61, 36): '"""xpels_meter"""', (61, 38, 61, 67): '"""Video format: XPelsPerMeter"""'}, {}), "(self, 'xpels_meter', 'Video format: XPelsPerMeter')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((62, 10, 62, 68), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(62, 17, 62, 21): 'self', (62, 23, 62, 36): '"""ypels_meter"""', (62, 38, 62, 67): '"""Video format: YPelsPerMeter"""'}, {}), "(self, 'ypels_meter', 'Video format: YPelsPerMeter')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((63, 10, 63, 59), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(63, 17, 63, 21): 'self', (63, 23, 63, 33): '"""clr_used"""', (63, 35, 63, 58): '"""Video format: ClrUsed"""'}, {}), "(self, 'clr_used', 'Video format: ClrUsed')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((64, 10, 64, 69), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(64, 17, 64, 21): 'self', (64, 23, 64, 38): '"""clr_important"""', (64, 40, 64, 68): '"""Video format: ClrImportant"""'}, {}), "(self, 'clr_important', 'Video format: ClrImportant')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((68, 10, 68, 59), 'lib.hachoir_core.field.UInt16', 'UInt16', ({(68, 17, 68, 21): 'self', (68, 23, 68, 32): '"""channel"""', (68, 34, 68, 58): '"""Audio format: Channels"""'}, {}), "(self, 'channel', 'Audio format: Channels')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((69, 10, 69, 66), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(69, 17, 69, 21): 'self', (69, 23, 69, 36): '"""sample_rate"""', (69, 38, 69, 65): '"""Audio format: Sample rate"""'}, {}), "(self, 'sample_rate', 'Audio format: Sample rate')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((70, 10, 70, 60), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(70, 17, 70, 21): 'self', (70, 23, 70, 33): '"""bit_rate"""', (70, 35, 70, 59): '"""Audio format: Bit rate"""'}, {}), "(self, 'bit_rate', 'Audio format: Bit rate')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((71, 10, 71, 66), 'lib.hachoir_core.field.UInt16', 'UInt16', ({(71, 17, 71, 21): 'self', (71, 23, 71, 36): '"""block_align"""', (71, 38, 71, 65): '"""Audio format: Block align"""'}, {}), "(self, 'block_align', 'Audio format: Block align')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((98, 14, 98, 55), 'lib.hachoir_core.field.ParserError', 'ParserError', ({(98, 26, 98, 54): '"""Invalid stream header size"""'}, {}), "('Invalid stream header size')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((99, 10, 99, 92), 'lib.hachoir_core.field.String', 'String', (), '', False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((105, 10, 105, 47), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(105, 17, 105, 21): 'self', (105, 23, 105, 30): '"""flags"""', (105, 32, 105, 46): '"""Stream flags"""'}, {}), "(self, 'flags', 'Stream flags')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((106, 10, 106, 53), 'lib.hachoir_core.field.UInt16', 'UInt16', ({(106, 17, 106, 21): 'self', (106, 23, 106, 33): '"""priority"""', (106, 35, 106, 52): '"""Stream priority"""'}, {}), "(self, 'priority', 'Stream priority')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((107, 10, 107, 85), 'lib.hachoir_core.field.String', 'String', (), '', False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((108, 10, 108, 54), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(108, 17, 108, 21): 'self', (108, 23, 108, 36): '"""init_frames"""', (108, 38, 108, 53): '"""InitialFrames"""'}, {}), "(self, 'init_frames', 'InitialFrames')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((109, 10, 109, 45), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(109, 17, 109, 21): 'self', (109, 23, 109, 30): '"""scale"""', (109, 32, 109, 44): '"""Time scale"""'}, {}), "(self, 'scale', 'Time scale')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((110, 10, 110, 68), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(110, 17, 110, 21): 'self', (110, 23, 110, 29): '"""rate"""', (110, 31, 110, 67): '"""Divide by scale to give frame rate"""'}, {}), "(self, 'rate', 'Divide by scale to give frame rate')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((111, 10, 111, 71), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(111, 17, 111, 21): 'self', (111, 23, 111, 30): '"""start"""', (111, 32, 111, 70): '"""Stream start time (unit: rate/scale)"""'}, {}), "(self, 'start', 'Stream start time (unit: rate/scale)')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((112, 10, 112, 68), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(112, 17, 112, 21): 'self', (112, 23, 112, 31): '"""length"""', (112, 33, 112, 67): '"""Stream length (unit: rate/scale)"""'}, {}), "(self, 'length', 'Stream length (unit: rate/scale)')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((113, 10, 113, 59), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(113, 17, 113, 21): 'self', (113, 23, 113, 33): '"""buf_size"""', (113, 35, 113, 58): '"""Suggested buffer size"""'}, {}), "(self, 'buf_size', 'Suggested buffer size')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((114, 10, 114, 51), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(114, 17, 114, 21): 'self', (114, 23, 114, 32): '"""quality"""', (114, 34, 114, 50): '"""Stream quality"""'}, {}), "(self, 'quality', 'Stream quality')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((115, 10, 115, 56), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(115, 17, 115, 21): 'self', (115, 23, 115, 36): '"""sample_size"""', (115, 38, 115, 55): '"""Size of samples"""'}, {}), "(self, 'sample_size', 'Size of samples')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((116, 10, 116, 62), 'lib.hachoir_core.field.UInt16', 'UInt16', ({(116, 17, 116, 21): 'self', (116, 23, 116, 29): '"""left"""', (116, 31, 116, 61): '"""Destination rectangle (left)"""'}, {}), "(self, 'left', 'Destination rectangle (left)')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((117, 10, 117, 60), 'lib.hachoir_core.field.UInt16', 'UInt16', ({(117, 17, 117, 21): 'self', (117, 23, 117, 28): '"""top"""', (117, 30, 117, 59): '"""Destination rectangle (top)"""'}, {}), "(self, 'top', 'Destination rectangle (top)')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((118, 10, 118, 64), 'lib.hachoir_core.field.UInt16', 'UInt16', ({(118, 17, 118, 21): 'self', (118, 23, 118, 30): '"""right"""', (118, 32, 118, 63): '"""Destination rectangle (right)"""'}, {}), "(self, 'right', 'Destination rectangle (right)')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((119, 10, 119, 66), 'lib.hachoir_core.field.UInt16', 'UInt16', ({(119, 17, 119, 21): 'self', (119, 23, 119, 31): '"""bottom"""', (119, 33, 119, 65): '"""Destination rectangle (bottom)"""'}, {}), "(self, 'bottom', 'Destination rectangle (bottom)')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((146, 10, 146, 70), 'lib.hachoir_core.field.UInt16', 'UInt16', ({(146, 17, 146, 21): 'self', (146, 23, 146, 36): '"""cda_version"""', (146, 38, 146, 69): '"""CD file version (currently 1)"""'}, {}), "(self, 'cda_version', 'CD file version (currently 1)')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((147, 10, 147, 53), 'lib.hachoir_core.field.UInt16', 'UInt16', ({(147, 17, 147, 21): 'self', (147, 23, 147, 33): '"""track_no"""', (147, 35, 147, 52): '"""Number of track"""'}, {}), "(self, 'track_no', 'Number of track')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((150, 10, 150, 65), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(150, 17, 150, 21): 'self', (150, 23, 150, 35): '"""hsg_offset"""', (150, 37, 150, 64): '"""Track offset (HSG format)"""'}, {}), "(self, 'hsg_offset', 'Track offset (HSG format)')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((151, 10, 151, 65), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(151, 17, 151, 21): 'self', (151, 23, 151, 35): '"""hsg_length"""', (151, 37, 151, 64): '"""Track length (HSG format)"""'}, {}), "(self, 'hsg_length', 'Track length (HSG format)')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((160, 10, 160, 63), 'lib.hachoir_core.field.UInt16', 'UInt16', ({(160, 17, 160, 21): 'self', (160, 23, 160, 35): '"""nb_channel"""', (160, 37, 160, 62): '"""Number of audio channel"""'}, {}), "(self, 'nb_channel', 'Number of audio channel')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((161, 10, 161, 61), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(161, 17, 161, 21): 'self', (161, 23, 161, 39): '"""sample_per_sec"""', (161, 41, 161, 60): '"""Sample per second"""'}, {}), "(self, 'sample_per_sec', 'Sample per second')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((162, 10, 162, 65), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(162, 17, 162, 21): 'self', (162, 23, 162, 37): '"""byte_per_sec"""', (162, 39, 162, 64): '"""Average byte per second"""'}, {}), "(self, 'byte_per_sec', 'Average byte per second')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((163, 10, 163, 52), 'lib.hachoir_core.field.UInt16', 'UInt16', ({(163, 17, 163, 21): 'self', (163, 23, 163, 36): '"""block_align"""', (163, 38, 163, 51): '"""Block align"""'}, {}), "(self, 'block_align', 'Block align')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((164, 10, 164, 59), 'lib.hachoir_core.field.UInt16', 'UInt16', ({(164, 17, 164, 21): 'self', (164, 23, 164, 39): '"""bit_per_sample"""', (164, 41, 164, 58): '"""Bits per sample"""'}, {}), "(self, 'bit_per_sample', 'Bits per sample')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((167, 10, 167, 72), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(167, 17, 167, 21): 'self', (167, 23, 167, 34): '"""nb_sample"""', (167, 36, 167, 71): '"""Number of samples in audio stream"""'}, {}), "(self, 'nb_sample', 'Number of samples in audio stream')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((170, 10, 170, 69), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(170, 17, 170, 21): 'self', (170, 23, 170, 43): '"""microsec_per_frame"""', (170, 45, 170, 68): '"""Microsecond per frame"""'}, {}), "(self, 'microsec_per_frame', 'Microsecond per frame')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((171, 10, 171, 69), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(171, 17, 171, 21): 'self', (171, 23, 171, 41): '"""max_byte_per_sec"""', (171, 43, 171, 68): '"""Maximum byte per second"""'}, {}), "(self, 'max_byte_per_sec', 'Maximum byte per second')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((172, 10, 172, 40), 'lib.hachoir_core.field.NullBytes', 'NullBytes', ({(172, 20, 172, 24): 'self', (172, 26, 172, 36): '"""reserved"""', (172, 38, 172, 39): '(4)'}, {}), "(self, 'reserved', 4)", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((175, 10, 175, 41), 'lib.hachoir_core.field.NullBits', 'NullBits', ({(175, 19, 175, 23): 'self', (175, 25, 175, 37): '"""reserved[]"""', (175, 39, 175, 40): '(4)'}, {}), "(self, 'reserved[]', 4)", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((176, 10, 176, 32), 'lib.hachoir_core.field.Bit', 'Bit', ({(176, 14, 176, 18): 'self', (176, 20, 176, 31): '"""has_index"""'}, {}), "(self, 'has_index')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((177, 10, 177, 37), 'lib.hachoir_core.field.Bit', 'Bit', ({(177, 14, 177, 18): 'self', (177, 20, 177, 36): '"""must_use_index"""'}, {}), "(self, 'must_use_index')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((178, 10, 178, 41), 'lib.hachoir_core.field.NullBits', 'NullBits', ({(178, 19, 178, 23): 'self', (178, 25, 178, 37): '"""reserved[]"""', (178, 39, 178, 40): '(2)'}, {}), "(self, 'reserved[]', 2)", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((179, 10, 179, 37), 'lib.hachoir_core.field.Bit', 'Bit', ({(179, 14, 179, 18): 'self', (179, 20, 179, 36): '"""is_interleaved"""'}, {}), "(self, 'is_interleaved')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((180, 10, 180, 41), 'lib.hachoir_core.field.NullBits', 'NullBits', ({(180, 19, 180, 23): 'self', (180, 25, 180, 37): '"""reserved[]"""', (180, 39, 180, 40): '(2)'}, {}), "(self, 'reserved[]', 2)", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((181, 10, 181, 35), 'lib.hachoir_core.field.Bit', 'Bit', ({(181, 14, 181, 18): 'self', (181, 20, 181, 34): '"""trust_cktype"""'}, {}), "(self, 'trust_cktype')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((182, 10, 182, 41), 'lib.hachoir_core.field.NullBits', 'NullBits', ({(182, 19, 182, 23): 'self', (182, 25, 182, 37): '"""reserved[]"""', (182, 39, 182, 40): '(4)'}, {}), "(self, 'reserved[]', 4)", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((183, 10, 183, 39), 'lib.hachoir_core.field.Bit', 'Bit', ({(183, 14, 183, 18): 'self', (183, 20, 183, 38): '"""was_capture_file"""'}, {}), "(self, 'was_capture_file')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((184, 10, 184, 37), 'lib.hachoir_core.field.Bit', 'Bit', ({(184, 14, 184, 18): 'self', (184, 20, 184, 36): '"""is_copyrighted"""'}, {}), "(self, 'is_copyrighted')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((185, 10, 185, 42), 'lib.hachoir_core.field.NullBits', 'NullBits', ({(185, 19, 185, 23): 'self', (185, 25, 185, 37): '"""reserved[]"""', (185, 39, 185, 41): '(14)'}, {}), "(self, 'reserved[]', 14)", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((187, 10, 187, 76), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(187, 17, 187, 21): 'self', (187, 23, 187, 36): '"""total_frame"""', (187, 38, 187, 75): '"""Total number of frames in the video"""'}, {}), "(self, 'total_frame', 'Total number of frames in the video')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((188, 10, 188, 81), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(188, 17, 188, 21): 'self', (188, 23, 188, 35): '"""init_frame"""', (188, 37, 188, 80): '"""Initial frame (used in interleaved video)"""'}, {}), "(self, 'init_frame', 'Initial frame (used in interleaved video)')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((189, 10, 189, 56), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(189, 17, 189, 21): 'self', (189, 23, 189, 34): '"""nb_stream"""', (189, 36, 189, 55): '"""Number of streams"""'}, {}), "(self, 'nb_stream', 'Number of streams')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((190, 10, 190, 63), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(190, 17, 190, 21): 'self', (190, 23, 190, 37): '"""sug_buf_size"""', (190, 39, 190, 62): '"""Suggested buffer size"""'}, {}), "(self, 'sug_buf_size', 'Suggested buffer size')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((191, 10, 191, 49), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(191, 17, 191, 21): 'self', (191, 23, 191, 30): '"""width"""', (191, 32, 191, 48): '"""Width in pixel"""'}, {}), "(self, 'width', 'Width in pixel')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((192, 10, 192, 51), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(192, 17, 192, 21): 'self', (192, 23, 192, 31): '"""height"""', (192, 33, 192, 50): '"""Height in pixel"""'}, {}), "(self, 'height', 'Height in pixel')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((193, 10, 193, 31), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(193, 17, 193, 21): 'self', (193, 23, 193, 30): '"""scale"""'}, {}), "(self, 'scale')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((194, 10, 194, 30), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(194, 17, 194, 21): 'self', (194, 23, 194, 29): '"""rate"""'}, {}), "(self, 'rate')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((195, 10, 195, 31), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(195, 17, 195, 21): 'self', (195, 23, 195, 30): '"""start"""'}, {}), "(self, 'start')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((196, 10, 196, 32), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(196, 17, 196, 21): 'self', (196, 23, 196, 31): '"""length"""'}, {}), "(self, 'length')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((199, 10, 199, 78), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(199, 17, 199, 21): 'self', (199, 23, 199, 36): '"""total_frame"""', (199, 38, 199, 77): '"""Real number of frame of OpenDML video"""'}, {}), "(self, 'total_frame', 'Real number of frame of OpenDML video')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((251, 8, 251, 44), 'lib.hachoir_core.field.FieldSet.__init__', 'FieldSet.__init__', ({(251, 26, 251, 30): 'self', (251, 32, 251, 37): '*args'}, {}), '(self, *args, **kw)', False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((319, 10, 319, 63), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(319, 17, 319, 21): 'self', (319, 23, 319, 33): '"""hdr_size"""', (319, 35, 319, 62): '"""Size of header (36 bytes)"""'}, {}), "(self, 'hdr_size', 'Size of header (36 bytes)')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((322, 10, 322, 75), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(322, 17, 322, 21): 'self', (322, 23, 322, 33): '"""nb_frame"""', (322, 35, 322, 74): '"""Number of unique Icons in this cursor"""'}, {}), "(self, 'nb_frame', 'Number of unique Icons in this cursor')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((323, 10, 323, 80), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(323, 17, 323, 21): 'self', (323, 23, 323, 32): '"""nb_step"""', (323, 34, 323, 79): '"""Number of Blits before the animation cycles"""'}, {}), "(self, 'nb_step', 'Number of Blits before the animation cycles')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((324, 10, 324, 28), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(324, 17, 324, 21): 'self', (324, 23, 324, 27): '"""cx"""'}, {}), "(self, 'cx')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((325, 10, 325, 28), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(325, 17, 325, 21): 'self', (325, 23, 325, 27): '"""cy"""'}, {}), "(self, 'cy')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((326, 10, 326, 35), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(326, 17, 326, 21): 'self', (326, 23, 326, 34): '"""bit_count"""'}, {}), "(self, 'bit_count')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((327, 10, 327, 32), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(327, 17, 327, 21): 'self', (327, 23, 327, 31): '"""planes"""'}, {}), "(self, 'planes')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((328, 10, 328, 103), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(328, 17, 328, 21): 'self', (328, 23, 328, 36): '"""jiffie_rate"""', (328, 38, 328, 102): '"""Default Jiffies (1/60th of a second) if rate chunk not present"""'}, {}), "(self, 'jiffie_rate',\n 'Default Jiffies (1/60th of a second) if rate chunk not present')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((329, 10, 329, 30), 'lib.hachoir_core.field.Bit', 'Bit', ({(329, 14, 329, 18): 'self', (329, 20, 329, 29): '"""is_icon"""'}, {}), "(self, 'is_icon')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((330, 10, 330, 39), 'lib.hachoir_core.field.NullBits', 'NullBits', ({(330, 19, 330, 23): 'self', (330, 25, 330, 34): '"""padding"""', (330, 36, 330, 38): '(31)'}, {}), "(self, 'padding', 31)", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((338, 25, 338, 47), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import timedelta\n'), ((345, 10, 345, 78), 'lib.hachoir_core.field.SubFile', 'SubFile', (), '', False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((67, 15, 67, 62), 'lib.hachoir_core.field.UInt16', 'UInt16', ({(67, 22, 67, 26): 'self', (67, 28, 67, 35): '"""codec"""', (67, 37, 67, 61): '"""Audio format: Codec id"""'}, {}), "(self, 'codec', 'Audio format: Codec id')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((73, 14, 73, 78), 'lib.hachoir_core.field.UInt16', 'UInt16', ({(73, 21, 73, 25): 'self', (73, 27, 73, 44): '"""bits_per_sample"""', (73, 46, 73, 77): '"""Audio format: Bits per sample"""'}, {}), "(self, 'bits_per_sample', 'Audio format: Bits per sample')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((75, 14, 75, 81), 'lib.hachoir_core.field.UInt16', 'UInt16', ({(75, 21, 75, 25): 'self', (75, 27, 75, 37): '"""ext_size"""', (75, 39, 75, 80): '"""Audio format: Size of extra information"""'}, {}), "(self, 'ext_size', 'Audio format: Size of extra information')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((77, 14, 77, 56), 'lib.hachoir_core.field.UInt16', 'UInt16', ({(77, 21, 77, 25): 'self', (77, 27, 77, 37): '"""reserved"""', (77, 39, 77, 55): '"""Audio format: """'}, {}), "(self, 'reserved', 'Audio format: ')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((78, 14, 78, 86), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(78, 21, 78, 25): 'self', (78, 27, 78, 41): '"""channel_mask"""', (78, 43, 78, 85): '"""Audio format: channels placement bitmask"""'}, {}), "(self, 'channel_mask', 'Audio format: channels placement bitmask')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((79, 14, 79, 69), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(79, 21, 79, 25): 'self', (79, 27, 79, 38): '"""subformat"""', (79, 40, 79, 68): '"""Audio format: Subformat id"""'}, {}), "(self, 'subformat', 'Audio format: Subformat id')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((126, 14, 126, 34), 'lib.hachoir_core.field.UInt8', 'UInt8', ({(126, 20, 126, 24): 'self', (126, 26, 126, 33): '"""frame"""'}, {}), "(self, 'frame')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((127, 14, 127, 35), 'lib.hachoir_core.field.UInt8', 'UInt8', ({(127, 20, 127, 24): 'self', (127, 26, 127, 34): '"""second"""'}, {}), "(self, 'second')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((128, 14, 128, 35), 'lib.hachoir_core.field.UInt8', 'UInt8', ({(128, 20, 128, 24): 'self', (128, 26, 128, 34): '"""minute"""'}, {}), "(self, 'minute')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((129, 14, 129, 46), 'lib.hachoir_core.field.PaddingBytes', 'PaddingBytes', ({(129, 27, 129, 31): 'self', (129, 33, 129, 42): '"""notused"""', (129, 44, 129, 45): '(1)'}, {}), "(self, 'notused', 1)", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((148, 22, 148, 71), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(148, 29, 148, 33): 'self', (148, 35, 148, 48): '"""disc_serial"""', (148, 50, 148, 70): '"""Disc serial number"""'}, {}), "(self, 'disc_serial', 'Disc serial number')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((159, 15, 159, 51), 'lib.hachoir_core.field.UInt16', 'UInt16', ({(159, 22, 159, 26): 'self', (159, 28, 159, 35): '"""codec"""', (159, 37, 159, 50): '"""Audio codec"""'}, {}), "(self, 'codec', 'Audio codec')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((202, 14, 202, 51), 'lib.hachoir_core.field.NullBytes', 'NullBytes', ({(202, 24, 202, 28): 'self', (202, 30, 202, 41): '"""padding[]"""', (202, 43, 202, 50): 'padding'}, {}), "(self, 'padding[]', padding)", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((207, 14, 207, 60), 'lib.hachoir_core.field.String', 'String', (), '', False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((208, 14, 208, 35), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(208, 21, 208, 25): 'self', (208, 27, 208, 34): '"""flags"""'}, {}), "(self, 'flags')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((209, 14, 209, 70), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(209, 21, 209, 25): 'self', (209, 27, 209, 34): '"""start"""', (209, 36, 209, 69): '"""Offset from start of movie data"""'}, {}), "(self, 'start', 'Offset from start of movie data')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((210, 14, 210, 36), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(210, 21, 210, 25): 'self', (210, 27, 210, 35): '"""length"""'}, {}), "(self, 'length')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((267, 14, 267, 60), 'lib.hachoir_core.field.String', 'String', (), '', False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((334, 14, 334, 36), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(334, 21, 334, 25): 'self', (334, 27, 334, 35): '"""icon[]"""'}, {}), "(self, 'icon[]')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((388, 14, 388, 80), 'lib.hachoir_core.field.String', 'String', (), '', False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((390, 14, 390, 96), 'lib.hachoir_core.field.String', 'String', (), '', False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((252, 26, 252, 59), 'lib.hachoir_core.tools.alignValue', 'alignValue', ({(252, 37, 252, 55): "self['size'].value", (252, 57, 252, 58): '(2)'}, {}), "(self['size'].value, 2)", False, 'from lib.hachoir_core.tools import alignValue, humanDuration\n'), ((268, 30, 268, 58), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(268, 37, 268, 41): 'self', (268, 43, 268, 49): '"""size"""', (268, 51, 268, 57): '"""Size"""'}, {}), "(self, 'size', 'Size')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((272, 18, 272, 71), 'lib.hachoir_core.field.String', 'String', (), '', False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((342, 26, 342, 48), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(342, 33, 342, 37): 'self', (342, 39, 342, 47): '"""rate[]"""'}, {}), "(self, 'rate[]')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((389, 30, 389, 67), 'lib.hachoir_core.field.UInt32', 'UInt32', ({(389, 37, 389, 41): 'self', (389, 43, 389, 53): '"""filesize"""', (389, 55, 389, 66): '"""File size"""'}, {}), "(self, 'filesize', 'File size')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((402, 18, 402, 78), 'lib.hachoir_core.field.RawBytes', 'RawBytes', ({(402, 27, 402, 31): 'self', (402, 33, 402, 44): '"""padding[]"""', (402, 46, 402, 77): '((self.size - self.current_size) / 8)'}, {}), "(self, 'padding[]', (self.size - self.current_size) / 8)", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((285, 22, 285, 71), 'lib.hachoir_core.field.RawBytes', 'RawBytes', ({(285, 31, 285, 35): 'self', (285, 37, 285, 50): '"""raw_content"""', (285, 52, 285, 70): "self['size'].value"}, {}), "(self, 'raw_content', self['size'].value)", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((278, 26, 278, 61), 'lib.hachoir_core.field.UInt8', 'UInt8', ({(278, 32, 278, 36): 'self', (278, 38, 278, 49): '"""padding[]"""', (278, 51, 278, 60): '"""Padding"""'}, {}), "(self, 'padding[]', 'Padding')", False, 'from lib.hachoir_core.field import FieldSet, ParserError, UInt8, UInt16, UInt32, Enum, Bit, NullBits, NullBytes, RawBytes, String, PaddingBytes, SubFile\n'), ((422, 39, 422, 59), 'lib.hachoir_core.tools.humanDuration', 'humanDuration', ({(422, 53, 422, 58): 'delta'}, {}), '(delta)', False, 'from lib.hachoir_core.tools import alignValue, humanDuration\n')] |
MartinEngen/NaiveBayesianClassifier | Utils.py | a28813708a4d2adcdcd629e6d4d8b4f438a9c799 | import os
import re
def get_subfolder_paths(folder_relative_path: str) -> list:
"""
Gets all subfolders of a given path
:param folder_relative_path: Relative path of folder to find subfolders of
:return: list of relative paths to any subfolders
"""
return [f.path for f in os.scandir(folder_relative_path) if f.is_dir()]
def get_group_name(group_path: str) -> str:
return group_path.split("\\")[-1]
def replace_unwanted_characters(line: str) -> str:
return re.sub(
r'([^\s\w]|_)+',
u' ',
line.replace('\n', ' ').replace('\t', ' '),
flags=re.UNICODE
)
def clean_document(document_file) -> list:
document = document_file.read().lower().split("\n\n")
cleaned_lines = list(map(replace_unwanted_characters, document[1:]))
# lambda x, y: x + y, a, b
list_of_lines = map(lambda x: x.split(" "), cleaned_lines)
flattened_list_of_lines = [val for sublist in list_of_lines for val in sublist]
return filter(lambda x: x != '', flattened_list_of_lines)
| [((11, 28, 11, 60), 'os.scandir', 'os.scandir', ({(11, 39, 11, 59): 'folder_relative_path'}, {}), '(folder_relative_path)', False, 'import os\n')] |
carlboudreau007/ecosys | tools/ldbc_benchmark/neo4j/load_scripts/time_index.py | d415143837a85ceb6213a0f0588128a86a4a3984 | from datetime import datetime
with open('/home/neo4j/neo4j-community-3.5.1/logs/debug.log', 'r') as log:
begin = []
end = []
for line in log:
if 'Index population started' in line:
begin.append(line[:23])
elif 'Index creation finished' in line:
end.append(line[:23])
if len(begin) == 0 or len(begin) > 9:
print("Something went wrong. Please check debug.log")
elif len(begin) != len(end):
print("{}/{} Done. Please come back later.".format(len(end), len(begin)))
else:
elapsed_time = 0
for i in range(0,9):
begin_tmp = datetime.strptime(begin[i], '%Y-%m-%d %H:%M:%S.%f')
end_tmp = datetime.strptime(end[i],'%Y-%m-%d %H:%M:%S.%f')
elapsed_time += (end_tmp-begin_tmp).total_seconds()
print("Done in {} s".format(elapsed_time))
| [((18, 18, 18, 69), 'datetime.datetime.strptime', 'datetime.strptime', ({(18, 36, 18, 44): 'begin[i]', (18, 46, 18, 68): '"""%Y-%m-%d %H:%M:%S.%f"""'}, {}), "(begin[i], '%Y-%m-%d %H:%M:%S.%f')", False, 'from datetime import datetime\n'), ((19, 16, 19, 64), 'datetime.datetime.strptime', 'datetime.strptime', ({(19, 34, 19, 40): 'end[i]', (19, 41, 19, 63): '"""%Y-%m-%d %H:%M:%S.%f"""'}, {}), "(end[i], '%Y-%m-%d %H:%M:%S.%f')", False, 'from datetime import datetime\n')] |
Ziki2001/new-school-sdk | zf-setup.py | b606e666888e1c9813e2f1a6a64bbede3744026e | # -*- coding: utf-8 -*-
'''
:file: setup.py
:author: -Farmer
:url: https://blog.farmer233.top
:date: 2021/09/20 11:11:54
'''
from os import path
from setuptools import setup, find_packages
basedir = path.abspath(path.dirname(__file__))
with open(path.join(basedir, "README.md"), encoding='utf-8') as f:
long_description = f.read()
setup(
name="zf-school-sdk",
author="farmer.chillax",
version="1.3.2",
license='MIT',
author_email="[email protected]",
description="zf School SDK for Python",
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/Farmer-chong/new-school-sdk',
packages=find_packages(),
# package_data={},
package_data={"school_sdk": ['check_code/model.pkl']},
include_package_data=True,
platforms='any',
zip_safe=False,
install_requires=[
'requests',
'pyquery',
'bs4',
'Pillow',
'fake-headers',
'torch',
'torchvision',
],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.8',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
# python zf-setup.py bdist_wheel sdist
# twine upload dist/* | [((12, 23, 12, 45), 'os.path.dirname', 'path.dirname', ({(12, 36, 12, 44): '__file__'}, {}), '(__file__)', False, 'from os import path\n'), ((14, 10, 14, 41), 'os.path.join', 'path.join', ({(14, 20, 14, 27): 'basedir', (14, 29, 14, 40): '"""README.md"""'}, {}), "(basedir, 'README.md')", False, 'from os import path\n'), ((28, 13, 28, 28), 'setuptools.find_packages', 'find_packages', ({}, {}), '()', False, 'from setuptools import setup, find_packages\n')] |
antx-code/funcode | RunIt/airt/poker_cards.py | a8a9b99274e169562771b488a3a9551277ef4b99 | # Square 方片 => sq => RGB蓝色(Blue)
# Plum 梅花 => pl => RGB绿色(Green)
# Spade 黑桃 => sp => RGB黑色(Black)
# Heart 红桃 => he => RGB红色(Red)
init_poker = {
'local': {
'head': [-1, -1, -1],
'mid': [-1, -1, -1, -1, -1],
'tail': [-1, -1, -1, -1, -1],
'drop': [-1, -1, -1, -1],
'hand': [-1, -1, -1]
},
'player1': {
'head': [-1, -1, -1],
'mid': [-1, -1, -1, -1, -1],
'tail': [-1, -1, -1, -1, -1],
'drop': [-1, -1, -1, -1],
'hand': [-1, -1, -1]
},
'player2': {
'head': [-1, -1, -1],
'mid': [-1, -1, -1, -1, -1],
'tail': [-1, -1, -1, -1, -1],
'drop': [-1, -1, -1, -1],
'hand': [-1, -1, -1]
}
}
# Square
Blue = {
'2': 0,
'3': 1,
'4': 2,
'5': 3,
'6': 4,
'7': 5,
'8': 6,
'9': 7,
'10': 8,
'J': 9,
'Q': 10,
'K': 11,
'A': 12
}
# Plum
Green = {
'2': 13,
'3': 14,
'4': 15,
'5': 16,
'6': 17,
'7': 18,
'8': 19,
'9': 20,
'10': 21,
'J': 22,
'Q': 23,
'K': 24,
'A': 25
}
# Heart
Red = {
'2': 26,
'3': 27,
'4': 28,
'5': 29,
'6': 30,
'7': 31,
'8': 32,
'9': 33,
'10': 34,
'J': 35,
'Q': 36,
'K': 37,
'A': 38
}
# Spade
Black = {
'2': 39,
'3': 40,
'4': 41,
'5': 42,
'6': 43,
'7': 44,
'8': 45,
'9': 46,
'10': 47,
'J': 48,
'Q': 49,
'K': 50,
'A': 51
}
POKER_SCOPE = [
'2',
'3',
'4',
'5',
'6',
'7',
'8',
'9',
'10',
'J',
'Q',
'K',
'A'
]
| [] |
reflective21/iportfolio | main.py | 39db626a9754c1df44ac698f3d8988fdc4e7c6d5 | name = "David Asiru Adetomiwa"
print(name) | [] |
kdeltared/tcex | tcex/services/api_service.py | 818c0d09256764f871e42d9ca5916f92d941d882 | """TcEx Framework API Service module."""
# standard library
import json
import sys
import threading
import traceback
from io import BytesIO
from typing import Any
from .common_service import CommonService
class ApiService(CommonService):
"""TcEx Framework API Service module."""
def __init__(self, tcex: object):
"""Initialize the Class properties.
Args:
tcex: Instance of TcEx.
"""
super().__init__(tcex)
# properties
self._metrics = {'Errors': 0, 'Requests': 0, 'Responses': 0}
# config callbacks
self.api_event_callback = None
@property
def command_map(self) -> dict:
"""Return the command map for the current Service type."""
command_map = super().command_map
command_map.update({'runservice': self.process_run_service_command})
return command_map
def format_query_string(self, params: dict) -> str:
"""Convert name/value array to a query string.
Args:
params: The query params for the request.
Returns:
str: The query params reformatted as a string.
"""
query_string = []
try:
for q in params:
query_string.append(f'''{q.get('name')}={q.get('value')}''')
except AttributeError as e:
self.log.error(
f'feature=api-service, event=bad-params-provided, params={params}, error="""{e})"""'
)
self.log.trace(traceback.format_exc())
return '&'.join(query_string)
def format_request_headers(self, headers: dict) -> dict:
"""Convert name/value array to a headers dict.
Args:
headers: The dict of key/value header data.
Returns:
dict: The restructured header data.
"""
headers_ = {}
try:
for h in headers:
# TODO: either support tuple or csv list of values
# headers_.setdefault(h.get('name').lower(), []).append(h.get('value'))
headers_.setdefault(h.get('name').lower(), str(h.get('value')))
except AttributeError as e:
self.log.error(
f'feature=api-service, event=bad-headers-provided, '
f'headers={headers}, error="""{e})"""'
)
self.log.trace(traceback.format_exc())
return headers_
def format_response_headers(self, headers: dict) -> dict:
"""Convert name/value array to a query string.
Args:
headers: The dict header data to be converted to key/value pairs.
Returns:
dict: The restructured header data.
"""
headers_ = []
try:
for h in headers:
headers_.append({'name': h[0], 'value': h[1]})
except AttributeError as e:
self.log.error(
f'feature=api-service, event=bad-headers-provided, '
f'headers={headers}, error="""{e})"""'
)
self.log.trace(traceback.format_exc())
return headers_
def process_run_service_response(self, *args, **kwargs) -> None:
"""Handle service event responses.
('200 OK', [('content-type', 'application/json'), ('content-length', '103')])
"""
self.log.info('feature=api-service, event=response-received, status=waiting-for-body')
kwargs.get('event').wait(30) # wait for thread event - (set on body write)
self.log.trace(f'feature=api-service, event=response, args={args}')
try:
status_code, status = args[0].split(' ', 1)
response = {
'bodyVariable': 'response.body',
'command': 'Acknowledged',
'headers': self.format_response_headers(args[1]),
'requestKey': kwargs.get('request_key'), # pylint: disable=cell-var-from-loop
'status': status,
'statusCode': status_code,
'type': 'RunService',
}
self.log.info('feature=api-service, event=response-sent')
self.message_broker.publish(json.dumps(response), self.args.tc_svc_client_topic)
self.increment_metric('Responses')
except Exception as e:
self.log.error(
f'feature=api-service, event=failed-creating-response-body, error="""{e}"""'
)
self.log.trace(traceback.format_exc())
self.increment_metric('Errors')
def process_run_service_command(self, message: dict) -> None:
"""Process the RunService command.
.. code-block:: python
:linenos:
:lineno-start: 1
{
"command": "RunService",
"apiToken": "abc123",
"bodyVariable": "request.body",
"headers": [ { key/value pairs } ],
"method": "GET",
"queryParams": [ { key/value pairs } ],
"requestKey": "123abc",
"userConfig": [{
"name": "tlpExportSetting",
"value": "TLP:RED"
}],
}
Args:
message: The message payload from the server topic.
"""
# register config apiToken (before any logging)
self.token.register_token(
self.thread_name, message.get('apiToken'), message.get('expireSeconds')
)
self.log.info(f'feature=api-service, event=runservice-command, message="{message}"')
# thread event used to block response until body is written
event = threading.Event()
# process message
request_key: str = message.get('requestKey')
body = None
try:
# read body from redis
body_variable: str = message.pop('bodyVariable', None)
if body_variable is not None:
body: Any = self.key_value_store.read(request_key, body_variable)
if body is not None:
# for API service the data in Redis is not b64 encoded
body = BytesIO(body)
except Exception as e:
self.log.error(f'feature=api-service, event=failed-reading-body, error="""{e}"""')
self.log.trace(traceback.format_exc())
headers: dict = self.format_request_headers(message.pop('headers'))
method: str = message.pop('method')
params: dict = message.pop('queryParams')
path: str = message.pop('path')
try:
environ = {
'wsgi.errors': sys.stderr,
'wsgi.input': body,
'wsgi.multithread': True,
'wsgi.multiprocess': False,
'wsgi.run_once': True,
'wsgi.url_scheme': 'https',
'wsgi.version': (1, 0),
'PATH_INFO': path,
'QUERY_STRING': self.format_query_string(params),
'REMOTE_ADDR': message.get('remoteAddress', ''),
# 'REMOTE_HOST': message.get('remoteAddress', ''),
'REQUEST_METHOD': method.upper(),
'SCRIPT_NAME': '/',
'SERVER_NAME': '',
'SERVER_PORT': '',
'SERVER_PROTOCOL': 'HTTP/1.1',
}
# Add user config for TAXII or other service that supports the data type
environ['user_config'] = message.get('userConfig', [])
# add headers
if headers.get('content-type') is not None:
environ['CONTENT_TYPE'] = headers.pop('content-type')
# add content length
if headers.get('content-length') is not None:
environ['CONTENT_LENGTH'] = headers.pop('content-length')
for header, value in headers.items():
environ[f'HTTP_{header}'.upper()] = value
# make values from message available in env in camel
# case (e.g., falcon -> req.env.get('request_url))
for key, value in message.items():
if key not in environ and self.tcex.utils.camel_to_snake(key) not in environ:
environ[self.tcex.utils.camel_to_snake(key)] = value
self.log.trace(f'feature=api-service, environ={environ}')
self.increment_metric('Requests')
except Exception as e:
self.log.error(f'feature=api-service, event=failed-building-environ, error="""{e}"""')
self.log.trace(traceback.format_exc())
self.increment_metric('Errors')
return # stop processing
def response_handler(*args, **kwargs): # pylint: disable=unused-argument
"""Handle WSGI Response"""
kwargs['event'] = event # add event to kwargs for blocking
kwargs['request_key'] = request_key
self.service_thread(
name='response-handler',
target=self.process_run_service_response,
args=args,
kwargs=kwargs,
)
if callable(self.api_event_callback):
try:
body_data: Any = self.api_event_callback( # pylint: disable=not-callable
environ, response_handler
)
# process body
body = ''
if hasattr(body_data, 'read'):
body = body_data.read()
elif isinstance(body_data, list):
for bd in body_data:
if hasattr(bd, 'read'):
body += bd.read()
elif isinstance(bd, bytes):
body += bd.decode()
elif isinstance(bd, list):
for b in bd:
self.log.error(f'unhandled type - {type(b)}')
else:
self.log.error(f'unhandled type - {type(body)}')
self.log.error(f'unhandled type dir - {dir(body)}')
# write body to Redis
self.key_value_store.create(request_key, 'response.body', body)
# set thread event to True to trigger response
self.log.info('feature=api-service, event=response-body-written')
event.set()
except Exception as e:
self.log.error(
f'feature=api-service, event=api-event-callback-failed, error="""{e}""".'
)
self.log.trace(traceback.format_exc())
self.increment_metric('Errors')
# unregister config apiToken
self.token.unregister_token(self.thread_name)
| [((162, 16, 162, 33), 'threading.Event', 'threading.Event', ({}, {}), '()', False, 'import threading\n'), ((122, 40, 122, 60), 'json.dumps', 'json.dumps', ({(122, 51, 122, 59): 'response'}, {}), '(response)', False, 'import json\n'), ((54, 27, 54, 49), 'traceback.format_exc', 'traceback.format_exc', ({}, {}), '()', False, 'import traceback\n'), ((78, 27, 78, 49), 'traceback.format_exc', 'traceback.format_exc', ({}, {}), '()', False, 'import traceback\n'), ((99, 27, 99, 49), 'traceback.format_exc', 'traceback.format_exc', ({}, {}), '()', False, 'import traceback\n'), ((128, 27, 128, 49), 'traceback.format_exc', 'traceback.format_exc', ({}, {}), '()', False, 'import traceback\n'), ((174, 27, 174, 40), 'io.BytesIO', 'BytesIO', ({(174, 35, 174, 39): 'body'}, {}), '(body)', False, 'from io import BytesIO\n'), ((177, 27, 177, 49), 'traceback.format_exc', 'traceback.format_exc', ({}, {}), '()', False, 'import traceback\n'), ((227, 27, 227, 49), 'traceback.format_exc', 'traceback.format_exc', ({}, {}), '()', False, 'import traceback\n'), ((275, 31, 275, 53), 'traceback.format_exc', 'traceback.format_exc', ({}, {}), '()', False, 'import traceback\n')] |
vsatyakumar/mmpose | mmpose/core/optimizer/builder.py | 2fffccb19dad3b59184b41be94653f75523b8585 | from mmcv.runner import build_optimizer
def build_optimizers(model, cfgs):
"""Build multiple optimizers from configs.
If `cfgs` contains several dicts for optimizers, then a dict for each
constructed optimizers will be returned.
If `cfgs` only contains one optimizer config, the constructed optimizer
itself will be returned.
For example,
1) Multiple optimizer configs:
.. code-block:: python
optimizer_cfg = dict(
model1=dict(type='SGD', lr=lr),
model2=dict(type='SGD', lr=lr))
The return dict is
``dict('model1': torch.optim.Optimizer, 'model2': torch.optim.Optimizer)``
2) Single optimizer config:
.. code-block:: python
optimizer_cfg = dict(type='SGD', lr=lr)
The return is ``torch.optim.Optimizer``.
Args:
model (:obj:`nn.Module`): The model with parameters to be optimized.
cfgs (dict): The config dict of the optimizer.
Returns:
dict[:obj:`torch.optim.Optimizer`] | :obj:`torch.optim.Optimizer`:
The initialized optimizers.
"""
optimizers = {}
if hasattr(model, 'module'):
model = model.module
# determine whether 'cfgs' has several dicts for optimizers
if all(isinstance(v, dict) for v in cfgs.values()):
for key, cfg in cfgs.items():
cfg_ = cfg.copy()
module = getattr(model, key)
optimizers[key] = build_optimizer(module, cfg_)
return optimizers
else:
return build_optimizer(model, cfgs)
| [((52, 15, 52, 43), 'mmcv.runner.build_optimizer', 'build_optimizer', ({(52, 31, 52, 36): 'model', (52, 38, 52, 42): 'cfgs'}, {}), '(model, cfgs)', False, 'from mmcv.runner import build_optimizer\n'), ((49, 30, 49, 59), 'mmcv.runner.build_optimizer', 'build_optimizer', ({(49, 46, 49, 52): 'module', (49, 54, 49, 58): 'cfg_'}, {}), '(module, cfg_)', False, 'from mmcv.runner import build_optimizer\n')] |
angel-vazquez25/My-Backlog-Handler | register/views.py | 60880cfc6bcc5a7fb2d5c752c11bdfe741f76531 | import datetime
from django.contrib.auth import logout
from django.shortcuts import render, redirect
from .forms import RegisterForm
from django.http import HttpResponse
from django.contrib.auth.forms import AuthenticationForm
from django.conf import settings
from django.contrib.auth import authenticate, login
from django.http import HttpResponseRedirect
from django.contrib import messages
# Create your views here.
def register(response):
if response.user.is_authenticated:
return redirect("homepage")
else:
if response.method == "POST":
form = RegisterForm(response.POST)
if form.is_valid():
new_user = form.save()
# messages.info(response, "Thanks for registering. You are now logged in.")
new_user = authenticate(username=form.cleaned_data['username'],
password=form.cleaned_data['password1'],
)
login(response, new_user)
return redirect("homepage")
else:
form = RegisterForm()
return render(response, "register/register.html", {"form": form})
| [((16, 15, 16, 35), 'django.shortcuts.redirect', 'redirect', ({(16, 24, 16, 34): '"""homepage"""'}, {}), "('homepage')", False, 'from django.shortcuts import render, redirect\n'), ((32, 15, 32, 73), 'django.shortcuts.render', 'render', ({(32, 22, 32, 30): 'response', (32, 32, 32, 56): '"""register/register.html"""', (32, 58, 32, 72): "{'form': form}"}, {}), "(response, 'register/register.html', {'form': form})", False, 'from django.shortcuts import render, redirect\n'), ((28, 19, 28, 39), 'django.shortcuts.redirect', 'redirect', ({(28, 28, 28, 38): '"""homepage"""'}, {}), "('homepage')", False, 'from django.shortcuts import render, redirect\n'), ((24, 27, 26, 41), 'django.contrib.auth.authenticate', 'authenticate', (), '', False, 'from django.contrib.auth import authenticate, login\n'), ((27, 16, 27, 41), 'django.contrib.auth.login', 'login', ({(27, 22, 27, 30): 'response', (27, 32, 27, 40): 'new_user'}, {}), '(response, new_user)', False, 'from django.contrib.auth import authenticate, login\n')] |
Aerodlyn/mu | forum/migrations/0001_initial.py | 2c3b95e5a83d0f651dd8ad287b471803e1fec3a1 | # Generated by Django 3.1.7 on 2021-03-26 01:27
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Community',
fields=[
('name', models.CharField(max_length=64, primary_key=True, serialize=False)),
('description', models.TextField()),
('private', models.BooleanField(default=False)),
('slug', models.SlugField()),
],
),
]
| [((17, 25, 17, 91), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((18, 32, 18, 50), 'django.db.models.TextField', 'models.TextField', ({}, {}), '()', False, 'from django.db import migrations, models\n'), ((19, 28, 19, 62), 'django.db.models.BooleanField', 'models.BooleanField', (), '', False, 'from django.db import migrations, models\n'), ((20, 25, 20, 43), 'django.db.models.SlugField', 'models.SlugField', ({}, {}), '()', False, 'from django.db import migrations, models\n')] |
shirley-wu/text_to_table | custom_train.py | 44cb100b8ff2543b5b4efe1461502c00c34ef846 | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Train a new model on one or across multiple GPUs.
"""
import collections
import logging
import math
import os
import sys
import numpy as np
import torch
from fairseq import (
checkpoint_utils,
distributed_utils,
options,
quantization_utils,
tasks,
utils,
)
from fairseq import meters
from fairseq.checkpoint_utils import checkpoint_paths
from fairseq.data import iterators
from fairseq.file_io import PathManager
from fairseq.logging import metrics, progress_bar
from fairseq.model_parallel.megatron_trainer import MegatronTrainer
from fairseq.trainer import Trainer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("fairseq_cli.train")
class Saver:
def __init__(self):
self.best = None
self.keep_best = []
def save_checkpoint(self, args, trainer, epoch_itr, val_loss):
# only one worker should attempt to create the required dir
if args.distributed_rank == 0:
os.makedirs(args.save_dir, exist_ok=True)
prev_best = val_loss if self.best is None else self.best
if val_loss is not None:
best_function = max if args.maximize_best_checkpoint_metric else min
self.best = best_function(val_loss, prev_best)
if args.no_save:
return
trainer.consolidate_optimizer()
if not trainer.is_data_parallel_master:
return
def is_better(a, b):
return a >= b if args.maximize_best_checkpoint_metric else a <= b
write_timer = meters.StopwatchMeter()
write_timer.start()
epoch = epoch_itr.epoch
end_of_epoch = epoch_itr.end_of_epoch()
updates = trainer.get_num_updates()
suffix = getattr(args, "checkpoint_suffix", "")
checkpoint_conds = collections.OrderedDict()
save_epoch_checkpoint = (
end_of_epoch
and not args.no_epoch_checkpoints
and epoch % args.save_interval == 0
)
checkpoint_conds["checkpoint{}{}.pt".format(epoch, suffix)] = save_epoch_checkpoint
checkpoint_conds["checkpoint_{}_{}{}.pt".format(epoch, updates, suffix)] = (
not save_epoch_checkpoint
and args.save_interval_updates > 0
and updates % args.save_interval_updates == 0
)
checkpoint_conds["checkpoint_best{}.pt".format(suffix)] = val_loss is not None and (
self.best is None
or is_better(val_loss, self.best)
)
checkpoint_conds[
"checkpoint_last{}.pt".format(suffix)
] = not args.no_last_checkpoints
extra_state = {"train_iterator": epoch_itr.state_dict(), "val_loss": val_loss}
if self.best is not None:
extra_state.update({"best": self.best})
if args.keep_best_checkpoints > 0 and (len(self.keep_best) < args.keep_best_checkpoints or (
val_loss is not None and not is_better(self.keep_best[-1][0], val_loss))):
ckpt_name = "checkpoint{}{}.best_{:.4f}.pt".format(epoch, suffix, val_loss) if save_epoch_checkpoint \
else "checkpoint_{}_{}{}.best_{:.4f}.pt".format(epoch, updates, suffix, val_loss)
checkpoint_conds[ckpt_name] = True
self.keep_best.append((val_loss, ckpt_name))
self.keep_best = sorted(self.keep_best)
checkpoints = [
os.path.join(args.save_dir, fn) for fn, cond in checkpoint_conds.items() if cond
]
if len(checkpoints) > 0:
trainer.save_checkpoint(checkpoints[0], extra_state)
for cp in checkpoints[1:]:
PathManager.copy(checkpoints[0], cp, overwrite=True)
write_timer.stop()
logger.info(
"saved checkpoint {} (epoch {} @ {} updates, score {}) (writing took {} seconds)".format(
checkpoints[0], epoch, updates, val_loss, write_timer.sum
)
)
if not end_of_epoch and args.keep_interval_updates > 0:
# remove old checkpoints; checkpoints are sorted in descending order
checkpoints = checkpoint_paths(
args.save_dir, pattern=r"checkpoint_\d+_(\d+)\.pt"
)
for old_chk in checkpoints[args.keep_interval_updates:]:
if os.path.lexists(old_chk):
os.remove(old_chk)
if args.keep_last_epochs > 0:
# remove old epoch checkpoints; checkpoints are sorted in descending order
checkpoints = checkpoint_paths(args.save_dir, pattern=r"checkpoint(\d+)\.pt")
for old_chk in checkpoints[args.keep_last_epochs:]:
if os.path.lexists(old_chk):
os.remove(old_chk)
if len(self.keep_best) > args.keep_best_checkpoints:
for _, x in self.keep_best[args.keep_best_checkpoints:]:
x = os.path.join(args.save_dir, x)
if os.path.lexists(x):
os.remove(x)
self.keep_best = self.keep_best[:args.keep_best_checkpoints]
def main(args):
saver = Saver()
utils.import_user_module(args)
assert (
args.max_tokens is not None or args.batch_size is not None
), "Must specify batch size either with --max-tokens or --batch-size"
metrics.reset()
np.random.seed(args.seed)
utils.set_torch_seed(args.seed)
if distributed_utils.is_master(args):
checkpoint_utils.verify_checkpoint_directory(args.save_dir)
# Print args
logger.info(args)
# Setup task, e.g., translation, language modeling, etc.
task = tasks.setup_task(args)
# Load valid dataset (we load training data below, based on the latest checkpoint)
for valid_sub_split in args.valid_subset.split(","):
task.load_dataset(valid_sub_split, combine=False, epoch=1)
# Build model and criterion
model = task.build_model(args)
criterion = task.build_criterion(args)
logger.info(model)
logger.info("task: {} ({})".format(args.task, task.__class__.__name__))
logger.info("model: {} ({})".format(args.arch, model.__class__.__name__))
logger.info(
"criterion: {} ({})".format(args.criterion, criterion.__class__.__name__)
)
logger.info(
"num. model params: {} (num. trained: {})".format(
sum(p.numel() for p in model.parameters()),
sum(p.numel() for p in model.parameters() if p.requires_grad),
)
)
# (optionally) Configure quantization
if args.quantization_config_path is not None:
quantizer = quantization_utils.Quantizer(
config_path=args.quantization_config_path,
max_epoch=args.max_epoch,
max_update=args.max_update,
)
else:
quantizer = None
# Build trainer
if args.model_parallel_size == 1:
trainer = Trainer(args, task, model, criterion, quantizer)
else:
trainer = MegatronTrainer(args, task, model, criterion)
logger.info(
"training on {} devices (GPUs/TPUs)".format(args.distributed_world_size)
)
logger.info(
"max tokens per GPU = {} and max sentences per GPU = {}".format(
args.max_tokens, args.batch_size
)
)
# Load the latest checkpoint if one is available and restore the
# corresponding train iterator
extra_state, epoch_itr = checkpoint_utils.load_checkpoint(
args,
trainer,
# don't cache epoch iterators for sharded datasets
disable_iterator_cache=task.has_sharded_data("train"),
)
# Train until the learning rate gets too small
max_epoch = args.max_epoch or math.inf
lr = trainer.get_lr()
train_meter = meters.StopwatchMeter()
train_meter.start()
while lr > args.min_lr and epoch_itr.next_epoch_idx <= max_epoch:
# train for one epoch
valid_losses, should_stop = train(args, trainer, task, epoch_itr, saver)
if should_stop:
break
# only use first validation loss to update the learning rate
lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
epoch_itr = trainer.get_train_iterator(
epoch_itr.next_epoch_idx,
# sharded data: get train iterator for next epoch
load_dataset=task.has_sharded_data("train"),
# don't cache epoch iterators for sharded datasets
disable_iterator_cache=task.has_sharded_data("train"),
)
train_meter.stop()
logger.info("done training in {:.1f} seconds".format(train_meter.sum))
def should_stop_early(args, valid_loss):
# skip check if no validation was done in the current epoch
if valid_loss is None:
return False
if args.patience <= 0:
return False
def is_better(a, b):
return a > b if args.maximize_best_checkpoint_metric else a < b
prev_best = getattr(should_stop_early, "best", None)
if prev_best is None or is_better(valid_loss, prev_best):
should_stop_early.best = valid_loss
should_stop_early.num_runs = 0
return False
else:
should_stop_early.num_runs += 1
if should_stop_early.num_runs >= args.patience:
logger.info(
"early stop since valid performance hasn't improved for last {} runs".format(
args.patience
)
)
return True
else:
return False
@metrics.aggregate("train")
def train(args, trainer, task, epoch_itr, saver):
"""Train the model for one epoch and return validation losses."""
# Initialize data iterator
itr = epoch_itr.next_epoch_itr(
fix_batches_to_gpus=args.fix_batches_to_gpus,
shuffle=(epoch_itr.next_epoch_idx > args.curriculum),
)
update_freq = (
args.update_freq[epoch_itr.epoch - 1]
if epoch_itr.epoch <= len(args.update_freq)
else args.update_freq[-1]
)
itr = iterators.GroupedIterator(itr, update_freq)
if getattr(args, "tpu", False):
itr = utils.tpu_data_loader(itr)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
epoch=epoch_itr.epoch,
tensorboard_logdir=(
args.tensorboard_logdir if distributed_utils.is_master(args) else None
),
default_log_format=("tqdm" if not args.no_progress_bar else "simple"),
)
trainer.begin_epoch(epoch_itr.epoch)
valid_losses = [None]
valid_subsets = args.valid_subset.split(",")
should_stop = False
num_updates = trainer.get_num_updates()
for i, samples in enumerate(progress):
with metrics.aggregate("train_inner"), torch.autograd.profiler.record_function(
"train_step-%d" % i
):
log_output = trainer.train_step(samples)
if log_output is not None: # not OOM, overflow, ...
# log mid-epoch stats
num_updates = trainer.get_num_updates()
if num_updates % args.log_interval == 0:
stats = get_training_stats(metrics.get_smoothed_values("train_inner"))
progress.log(stats, tag="train_inner", step=num_updates)
# reset mid-epoch stats after each log interval
# the end-of-epoch stats will still be preserved
metrics.reset_meters("train_inner")
end_of_epoch = not itr.has_next()
valid_losses, should_stop = validate_and_save(
args, trainer, task, epoch_itr, valid_subsets, end_of_epoch, saver
)
if should_stop:
break
# log end-of-epoch stats
logger.info("end of epoch {} (average epoch stats below)".format(epoch_itr.epoch))
stats = get_training_stats(metrics.get_smoothed_values("train"))
progress.print(stats, tag="train", step=num_updates)
# reset epoch-level meters
metrics.reset_meters("train")
return valid_losses, should_stop
def validate_and_save(args, trainer, task, epoch_itr, valid_subsets, end_of_epoch, saver):
num_updates = trainer.get_num_updates()
max_update = args.max_update or math.inf
do_save = (
(end_of_epoch and epoch_itr.epoch % args.save_interval == 0)
or num_updates >= max_update
or (
args.save_interval_updates > 0
and num_updates > 0
and num_updates % args.save_interval_updates == 0
and num_updates >= args.validate_after_updates
)
)
do_validate = (
(not end_of_epoch and do_save) # validate during mid-epoch saves
or (end_of_epoch and epoch_itr.epoch % args.validate_interval == 0)
or num_updates >= max_update
or (
args.validate_interval_updates > 0
and num_updates > 0
and num_updates % args.validate_interval_updates == 0
)
) and not args.disable_validation
# Validate
valid_losses = [None]
if do_validate:
valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets, saver)
# Stopping conditions
should_stop = (
should_stop_early(args, valid_losses[0])
or num_updates >= max_update
or (
args.stop_time_hours > 0
and trainer.cumulative_training_time() / (60 * 60) > args.stop_time_hours
)
)
# Save checkpoint
if do_save or should_stop:
logger.info("begin save checkpoint")
saver.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
return valid_losses, should_stop
def get_training_stats(stats):
stats["wall"] = round(metrics.get_meter("default", "wall").elapsed_time, 0)
return stats
def validate(args, trainer, task, epoch_itr, subsets, saver):
"""Evaluate the model on the validation set(s) and return the losses."""
if args.fixed_validation_seed is not None:
# set fixed seed for every validation
utils.set_torch_seed(args.fixed_validation_seed)
trainer.begin_valid_epoch(epoch_itr.epoch)
valid_losses = []
for subset in subsets:
logger.info('begin validation on "{}" subset'.format(subset))
# Initialize data iterator
itr = trainer.get_valid_iterator(subset).next_epoch_itr(shuffle=False)
if getattr(args, "tpu", False):
itr = utils.tpu_data_loader(itr)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
epoch=epoch_itr.epoch,
prefix=f"valid on '{subset}' subset",
tensorboard_logdir=(
args.tensorboard_logdir if distributed_utils.is_master(args) else None
),
default_log_format=("tqdm" if not args.no_progress_bar else "simple"),
)
# create a new root metrics aggregator so validation metrics
# don't pollute other aggregators (e.g., train meters)
with metrics.aggregate(new_root=True) as agg:
for sample in progress:
trainer.valid_step(sample)
# log validation stats
stats = get_valid_stats(args, trainer, agg.get_smoothed_values(), saver)
progress.print(stats, tag=subset, step=trainer.get_num_updates())
valid_losses.append(stats[args.best_checkpoint_metric])
return valid_losses
def get_valid_stats(args, trainer, stats, saver):
stats["num_updates"] = trainer.get_num_updates()
if hasattr(saver.save_checkpoint, "best"):
key = "best_{0}".format(args.best_checkpoint_metric)
best_function = max if args.maximize_best_checkpoint_metric else min
stats[key] = best_function(
saver.save_checkpoint.best, stats[args.best_checkpoint_metric]
)
return stats
def cli_main(modify_parser=None):
parser = options.get_training_parser()
args = options.parse_args_and_arch(parser, modify_parser=modify_parser)
if args.profile:
with torch.cuda.profiler.profile():
with torch.autograd.profiler.emit_nvtx():
distributed_utils.call_main(args, main)
else:
distributed_utils.call_main(args, main)
if __name__ == "__main__":
cli_main()
| [((40, 9, 40, 47), 'logging.getLogger', 'logging.getLogger', ({(40, 27, 40, 46): '"""fairseq_cli.train"""'}, {}), "('fairseq_cli.train')", False, 'import logging\n'), ((278, 1, 278, 27), 'fairseq.logging.metrics.aggregate', 'metrics.aggregate', ({(278, 19, 278, 26): '"""train"""'}, {}), "('train')", False, 'from fairseq.logging import metrics, progress_bar\n'), ((150, 4, 150, 34), 'fairseq.utils.import_user_module', 'utils.import_user_module', ({(150, 29, 150, 33): 'args'}, {}), '(args)', False, 'from fairseq import checkpoint_utils, distributed_utils, options, quantization_utils, tasks, utils\n'), ((156, 4, 156, 19), 'fairseq.logging.metrics.reset', 'metrics.reset', ({}, {}), '()', False, 'from fairseq.logging import metrics, progress_bar\n'), ((158, 4, 158, 29), 'numpy.random.seed', 'np.random.seed', ({(158, 19, 158, 28): 'args.seed'}, {}), '(args.seed)', True, 'import numpy as np\n'), ((159, 4, 159, 35), 'fairseq.utils.set_torch_seed', 'utils.set_torch_seed', ({(159, 25, 159, 34): 'args.seed'}, {}), '(args.seed)', False, 'from fairseq import checkpoint_utils, distributed_utils, options, quantization_utils, tasks, utils\n'), ((161, 7, 161, 40), 'fairseq.distributed_utils.is_master', 'distributed_utils.is_master', ({(161, 35, 161, 39): 'args'}, {}), '(args)', False, 'from fairseq import checkpoint_utils, distributed_utils, options, quantization_utils, tasks, utils\n'), ((168, 11, 168, 33), 'fairseq.tasks.setup_task', 'tasks.setup_task', ({(168, 28, 168, 32): 'args'}, {}), '(args)', False, 'from fairseq import checkpoint_utils, distributed_utils, options, quantization_utils, tasks, utils\n'), ((227, 18, 227, 41), 'fairseq.meters.StopwatchMeter', 'meters.StopwatchMeter', ({}, {}), '()', False, 'from fairseq import meters\n'), ((291, 10, 291, 53), 'fairseq.data.iterators.GroupedIterator', 'iterators.GroupedIterator', ({(291, 36, 291, 39): 'itr', (291, 41, 291, 52): 'update_freq'}, {}), '(itr, update_freq)', False, 'from fairseq.data import iterators\n'), ((342, 4, 342, 33), 'fairseq.logging.metrics.reset_meters', 'metrics.reset_meters', ({(342, 25, 342, 32): '"""train"""'}, {}), "('train')", False, 'from fairseq.logging import metrics, progress_bar\n'), ((452, 13, 452, 42), 'fairseq.options.get_training_parser', 'options.get_training_parser', ({}, {}), '()', False, 'from fairseq import checkpoint_utils, distributed_utils, options, quantization_utils, tasks, utils\n'), ((453, 11, 453, 75), 'fairseq.options.parse_args_and_arch', 'options.parse_args_and_arch', (), '', False, 'from fairseq import checkpoint_utils, distributed_utils, options, quantization_utils, tasks, utils\n'), ((69, 22, 69, 45), 'fairseq.meters.StopwatchMeter', 'meters.StopwatchMeter', ({}, {}), '()', False, 'from fairseq import meters\n'), ((77, 27, 77, 52), 'collections.OrderedDict', 'collections.OrderedDict', ({}, {}), '()', False, 'import collections\n'), ((162, 8, 162, 67), 'fairseq.checkpoint_utils.verify_checkpoint_directory', 'checkpoint_utils.verify_checkpoint_directory', ({(162, 53, 162, 66): 'args.save_dir'}, {}), '(args.save_dir)', False, 'from fairseq import checkpoint_utils, distributed_utils, options, quantization_utils, tasks, utils\n'), ((192, 20, 196, 9), 'fairseq.quantization_utils.Quantizer', 'quantization_utils.Quantizer', (), '', False, 'from fairseq import checkpoint_utils, distributed_utils, options, quantization_utils, tasks, utils\n'), ((202, 18, 202, 66), 'fairseq.trainer.Trainer', 'Trainer', ({(202, 26, 202, 30): 'args', (202, 32, 202, 36): 'task', (202, 38, 202, 43): 'model', (202, 45, 202, 54): 'criterion', (202, 56, 202, 65): 'quantizer'}, {}), '(args, task, model, criterion, quantizer)', False, 'from fairseq.trainer import Trainer\n'), ((204, 18, 204, 63), 'fairseq.model_parallel.megatron_trainer.MegatronTrainer', 'MegatronTrainer', ({(204, 34, 204, 38): 'args', (204, 40, 204, 44): 'task', (204, 46, 204, 51): 'model', (204, 53, 204, 62): 'criterion'}, {}), '(args, task, model, criterion)', False, 'from fairseq.model_parallel.megatron_trainer import MegatronTrainer\n'), ((293, 14, 293, 40), 'fairseq.utils.tpu_data_loader', 'utils.tpu_data_loader', ({(293, 36, 293, 39): 'itr'}, {}), '(itr)', False, 'from fairseq import checkpoint_utils, distributed_utils, options, quantization_utils, tasks, utils\n'), ((338, 31, 338, 67), 'fairseq.logging.metrics.get_smoothed_values', 'metrics.get_smoothed_values', ({(338, 59, 338, 66): '"""train"""'}, {}), "('train')", False, 'from fairseq.logging import metrics, progress_bar\n'), ((403, 8, 403, 56), 'fairseq.utils.set_torch_seed', 'utils.set_torch_seed', ({(403, 29, 403, 55): 'args.fixed_validation_seed'}, {}), '(args.fixed_validation_seed)', False, 'from fairseq import checkpoint_utils, distributed_utils, options, quantization_utils, tasks, utils\n'), ((459, 8, 459, 47), 'fairseq.distributed_utils.call_main', 'distributed_utils.call_main', ({(459, 36, 459, 40): 'args', (459, 42, 459, 46): 'main'}, {}), '(args, main)', False, 'from fairseq import checkpoint_utils, distributed_utils, options, quantization_utils, tasks, utils\n'), ((51, 12, 51, 53), 'os.makedirs', 'os.makedirs', (), '', False, 'import os\n'), ((110, 12, 110, 43), 'os.path.join', 'os.path.join', ({(110, 25, 110, 38): 'args.save_dir', (110, 40, 110, 42): 'fn'}, {}), '(args.save_dir, fn)', False, 'import os\n'), ((126, 26, 128, 13), 'fairseq.checkpoint_utils.checkpoint_paths', 'checkpoint_paths', (), '', False, 'from fairseq.checkpoint_utils import checkpoint_paths\n'), ((135, 26, 135, 89), 'fairseq.checkpoint_utils.checkpoint_paths', 'checkpoint_paths', (), '', False, 'from fairseq.checkpoint_utils import checkpoint_paths\n'), ((312, 13, 312, 45), 'fairseq.logging.metrics.aggregate', 'metrics.aggregate', ({(312, 31, 312, 44): '"""train_inner"""'}, {}), "('train_inner')", False, 'from fairseq.logging import metrics, progress_bar\n'), ((312, 47, 314, 9), 'torch.autograd.profiler.record_function', 'torch.autograd.profiler.record_function', ({(313, 16, 313, 35): "('train_step-%d' % i)"}, {}), "('train_step-%d' % i)", False, 'import torch\n'), ((394, 26, 394, 62), 'fairseq.logging.metrics.get_meter', 'metrics.get_meter', ({(394, 44, 394, 53): '"""default"""', (394, 55, 394, 61): '"""wall"""'}, {}), "('default', 'wall')", False, 'from fairseq.logging import metrics, progress_bar\n'), ((413, 18, 413, 44), 'fairseq.utils.tpu_data_loader', 'utils.tpu_data_loader', ({(413, 40, 413, 43): 'itr'}, {}), '(itr)', False, 'from fairseq import checkpoint_utils, distributed_utils, options, quantization_utils, tasks, utils\n'), ((428, 13, 428, 45), 'fairseq.logging.metrics.aggregate', 'metrics.aggregate', (), '', False, 'from fairseq.logging import metrics, progress_bar\n'), ((455, 13, 455, 42), 'torch.cuda.profiler.profile', 'torch.cuda.profiler.profile', ({}, {}), '()', False, 'import torch\n'), ((37, 10, 37, 44), 'os.environ.get', 'os.environ.get', ({(37, 25, 37, 35): '"""LOGLEVEL"""', (37, 37, 37, 43): '"""INFO"""'}, {}), "('LOGLEVEL', 'INFO')", False, 'import os\n'), ((115, 16, 115, 68), 'fairseq.file_io.PathManager.copy', 'PathManager.copy', (), '', False, 'from fairseq.file_io import PathManager\n'), ((130, 19, 130, 43), 'os.path.lexists', 'os.path.lexists', ({(130, 35, 130, 42): 'old_chk'}, {}), '(old_chk)', False, 'import os\n'), ((137, 19, 137, 43), 'os.path.lexists', 'os.path.lexists', ({(137, 35, 137, 42): 'old_chk'}, {}), '(old_chk)', False, 'import os\n'), ((142, 20, 142, 50), 'os.path.join', 'os.path.join', ({(142, 33, 142, 46): 'args.save_dir', (142, 48, 142, 49): 'x'}, {}), '(args.save_dir, x)', False, 'import os\n'), ((143, 19, 143, 37), 'os.path.lexists', 'os.path.lexists', ({(143, 35, 143, 36): 'x'}, {}), '(x)', False, 'import os\n'), ((300, 39, 300, 72), 'fairseq.distributed_utils.is_master', 'distributed_utils.is_master', ({(300, 67, 300, 71): 'args'}, {}), '(args)', False, 'from fairseq import checkpoint_utils, distributed_utils, options, quantization_utils, tasks, utils\n'), ((326, 16, 326, 51), 'fairseq.logging.metrics.reset_meters', 'metrics.reset_meters', ({(326, 37, 326, 50): '"""train_inner"""'}, {}), "('train_inner')", False, 'from fairseq.logging import metrics, progress_bar\n'), ((456, 17, 456, 52), 'torch.autograd.profiler.emit_nvtx', 'torch.autograd.profiler.emit_nvtx', ({}, {}), '()', False, 'import torch\n'), ((457, 16, 457, 55), 'fairseq.distributed_utils.call_main', 'distributed_utils.call_main', ({(457, 44, 457, 48): 'args', (457, 50, 457, 54): 'main'}, {}), '(args, main)', False, 'from fairseq import checkpoint_utils, distributed_utils, options, quantization_utils, tasks, utils\n'), ((131, 20, 131, 38), 'os.remove', 'os.remove', ({(131, 30, 131, 37): 'old_chk'}, {}), '(old_chk)', False, 'import os\n'), ((138, 20, 138, 38), 'os.remove', 'os.remove', ({(138, 30, 138, 37): 'old_chk'}, {}), '(old_chk)', False, 'import os\n'), ((144, 20, 144, 32), 'os.remove', 'os.remove', ({(144, 30, 144, 31): 'x'}, {}), '(x)', False, 'import os\n'), ((321, 43, 321, 85), 'fairseq.logging.metrics.get_smoothed_values', 'metrics.get_smoothed_values', ({(321, 71, 321, 84): '"""train_inner"""'}, {}), "('train_inner')", False, 'from fairseq.logging import metrics, progress_bar\n'), ((421, 43, 421, 76), 'fairseq.distributed_utils.is_master', 'distributed_utils.is_master', ({(421, 71, 421, 75): 'args'}, {}), '(args)', False, 'from fairseq import checkpoint_utils, distributed_utils, options, quantization_utils, tasks, utils\n')] |
JessicaWiedemeier/IDV | src/ucar/unidata/idv/resources/python/griddiag.py | e5f67c755cc95f8ad2123bdc45a91f0e5eca0d64 | """
This is the doc for the Grid Diagnostics module. These functions
are based on the grid diagnostics from the GEneral Meteorological
PAcKage (GEMPAK). Note that the names are case sensitive and some
are named slightly different from GEMPAK functions to avoid conflicts
with Jython built-ins (e.g. str).
<P>
In the following operators, scalar operands are named S<sub>n</sub> and
vector operands are named V<sub>n</sub>. Lowercase u and v refer to the
grid relative components of a vector.
"""
def GRAVITY():
""" Gravity constant """
return DerivedGridFactory.GRAVITY;
# Math functions
def atn2(S1,S2,WA=0):
""" Wrapper for atan2 built-in
<div class=jython>
ATN2 (S1, S2) = ATAN ( S1 / S2 )<br>
WA = use WEIGHTED_AVERAGE (default NEAREST_NEIGHBOR)
</div>
"""
return GridMath.atan2(S1,S2,WA)
def add(S1,S2,WA=0):
""" Addition
<div class=jython>
ADD (S1, S2) = S1 + S2<br>
WA = use WEIGHTED_AVERAGE (default NEAREST_NEIGHBOR)
</div>
"""
return GridMath.add(S1,S2,WA)
def mul(S1,S2,WA=0):
""" Multiply
<div class=jython>
MUL (S1, S2) = S1 * S2<br>
WA = use WEIGHTED_AVERAGE (default NEAREST_NEIGHBOR)
</div>
"""
return GridMath.multiply(S1,S2,WA)
def quo(S1,S2,WA=0):
""" Divide
<div class=jython>
QUO (S1, S2) = S1 / S2<br>
WA = use WEIGHTED_AVERAGE (default NEAREST_NEIGHBOR)
</div>
"""
return GridMath.divide(S1,S2,WA)
def sub(S1,S2,WA=0):
""" Subtract
<div class=jython>
SUB (S1, S2) = S1 - S2<br>
WA = use WEIGHTED_AVERAGE (default NEAREST_NEIGHBOR)
</div>
"""
return GridMath.subtract(S1,S2,WA)
# Scalar quantities
def adv(S,V):
""" Horizontal Advection, negative by convention
<div class=jython>
ADV ( S, V ) = - ( u * DDX (S) + v * DDY (S) )
</div>
"""
return -add(mul(ur(V),ddx(S)),mul(vr(V),ddy(S)))
def avg(S1,S2):
""" Average of 2 scalars
<div class=jython>
AVG (S1, S2) = ( S1 + S2 ) / 2
</div>
"""
return add(S1,S2)/2
def avor(V):
""" Absolute Vorticity
<div class=jython>
AVOR ( V ) = VOR ( V ) + CORL(V)
</div>
"""
relv = vor(V)
return add(relv,corl(relv))
def circs(S, D=2):
"""
<div class=jython>
Apply a circular aperature smoothing to the grid points. The weighting
function is the circular aperature diffraction function. D is
the radius of influence in grid increments, increasing D increases
the smoothing. (default D=2)
</div>
"""
return GridUtil.smooth(S, "CIRC", int(D))
def corl(S):
""" Coriolis Parameter for all points in a grid
<div class=jython>
CORL = TWO_OMEGA*sin(latr)
</div>
"""
return DerivedGridFactory.createCoriolisGrid(S)
def cress(S, D=2):
"""
<div class=jython>
Apply a Cressman smoothing to the grid points. The smoothed value
is given by a weighted average of surrounding grid points. D is
the radius of influence in grid increments,
increasing D increases the smoothing. (default D=2)
</div>
"""
return GridUtil.smooth(S, "CRES", int(D))
def cros(V1,V2):
""" Vector cross product magnitude
<div class=jython>
CROS ( V1, V2 ) = u1 * v2 - u2 * v1
</div>
"""
return sub(mul(ur(V1),vr(V2)),mul(ur(V2),vr(V1)))
def ddx(S):
""" Take the derivative with respect to the domain's X coordinate
"""
return GridMath.ddx(S);
def ddy(S):
""" Take the derivative with respect to the domain's Y coordinate
"""
return GridMath.ddy(S);
def defr(V):
""" Total deformation
<div class=jython>
DEF ( V ) = ( STRD (V) ** 2 + SHR (V) ** 2 ) ** .5
</div>
"""
return mag(strd(V),shr(V))
def div(V):
""" Horizontal Divergence
<div class=jython>
DIV ( V ) = DDX ( u ) + DDY ( v )
</div>
"""
return add(ddx(ur(V)),ddy(vr(V)))
def dirn(V):
""" North relative direction of a vector
<div class=jython>
DIRN ( V ) = DIRR ( un(v), vn(v) )
</div>
"""
return dirr(DerivedGridFactory.createTrueFlowVector(V))
def dirr(V):
""" Grid relative direction of a vector
"""
return DerivedGridFactory.createVectorDirection(V)
def dot(V1,V2):
""" Vector dot product
<div class=jython>
DOT ( V1, V2 ) = u1 * u2 + v1 * v2
</div>
"""
product = mul(V1,V2)
return add(ur(product),vr(product))
def gwfs(S, N=6):
"""
<div class=jython>
Horizontal smoothing using normally distributed weights
with theoretical response of 1/e for N * delta-x wave.
Increasing N increases the smoothing. (default N=6)
</div>
"""
return GridUtil.smooth(S, "GWFS", int(N))
def jcbn(S1,S2):
""" Jacobian Determinant
<div class=jython>
JCBN ( S1, S2 ) = DDX (S1) * DDY (S2) - DDY (S1) * DDX (S2)
</div>
"""
return sub(mul(ddx(S1),ddy(S2)),mul(ddy(S1),ddx(S2)))
def latr(S):
""" Latitudue all points in a grid
"""
return DerivedGridFactory.createLatitudeGrid(S)
def lap(S):
""" Laplacian operator
<div class=jython>
LAP ( S ) = DIV ( GRAD (S) )
</div>
"""
grads = grad(S)
return div(grads)
def lav(S,level1=None,level2=None, unit=None):
""" Layer Average of a multi layer grid
<div class=jython>
LAV ( S ) = ( S (level1) + S (level2) ) / 2.
</div>
"""
if level1 == None:
return GridMath.applyFunctionOverLevels(S, GridMath.FUNC_AVERAGE)
else:
return layerAverage(S,level1,level2, unit)
def ldf(S,level1,level2, unit=None):
""" Layer Difference
<div class=jython>
LDF ( S ) = S (level1) - S (level2)
</div>
"""
return layerDiff(S,level1,level2, unit);
def mag(*a):
""" Magnitude of a vector
"""
if (len(a) == 1):
return DerivedGridFactory.createVectorMagnitude(a[0]);
else:
return DerivedGridFactory.createVectorMagnitude(a[0],a[1]);
def mixr(temp,rh):
""" Mixing Ratio from Temperature, RH (requires pressure domain)
"""
return DerivedGridFactory.createMixingRatio(temp,rh)
def relh(temp,mixr):
""" Create Relative Humidity from Temperature, mixing ratio (requires pressure domain)
"""
return DerivedGridFactory.createRelativeHumidity(temp,mixr)
def pvor(S,V):
""" Potetial Vorticity (usually from theta and wind)
"""
return DerivedGridFactory.createPotentialVorticity(S,V)
def rects(S, D=2):
"""
<div class=jython>
Apply a rectangular aperature smoothing to the grid points. The weighting
function is the product of the rectangular aperature diffraction function
in the x and y directions. D is the radius of influence in grid
increments, increasing D increases the smoothing. (default D=2)
</div>
"""
return GridUtil.smooth(S, "RECT", int(D))
def savg(S):
""" Average over whole grid
<div class=jython>
SAVG ( S ) = average of all non-missing grid point values
</div>
"""
return GridMath.applyFunctionToLevels(S, GridMath.FUNC_AVERAGE)
def savs(S):
""" Average over grid subset
<div class=jython>
SAVS ( S ) = average of all non-missing grid point values in the subset
area
</div>
"""
return savg(S)
def sdiv(S,V):
""" Horizontal Flux Divergence
<div class=jython>
SDIV ( S, V ) = S * DIV ( V ) + DOT ( V, GRAD ( S ) )
</div>
"""
return add(mul(S,(div(V))) , dot(V,grad(S)))
def shr(V):
""" Shear Deformation
<div class=jython>
SHR ( V ) = DDX ( v ) + DDY ( u )
</div>
"""
return add(ddx(vr(V)),ddy(ur(V)))
def sm5s(S):
""" Smooth a scalar grid using a 5-point smoother
<div class=jython>
SM5S ( S ) = .5 * S (i,j) + .125 * ( S (i+1,j) + S (i,j+1) +
S (i-1,j) + S (i,j-1) )
</div>
"""
return GridUtil.smooth(S, "SM5S")
def sm9s(S):
""" Smooth a scalar grid using a 9-point smoother
<div class=jython>
SM9S ( S ) = .25 * S (i,j) + .125 * ( S (i+1,j) + S (i,j+1) +
S (i-1,j) + S (i,j-1) )
+ .0625 * ( S (i+1,j+1) +
S (i+1,j-1) +
S (i-1,j+1) +
S (i-1,j-1) )
</div>
"""
return GridUtil.smooth(S, "SM9S")
def strd(V):
""" Stretching Deformation
<div class=jython>
STRD ( V ) = DDX ( u ) - DDY ( v )
</div>
"""
return sub(ddx(ur(V)),ddy(vr(V)))
def thta(temp):
""" Potential Temperature from Temperature (requires pressure domain)
"""
return DerivedGridFactory.createPotentialTemperature(temp)
def thte(temp,rh):
""" Equivalent Potential Temperature from Temperature and Relative
humidity (requires pressure domain)
"""
return DerivedGridFactory.createEquivalentPotentialTemperature(temp,rh)
def un(V):
""" North relative u component
"""
return ur(DerivedGridFactory.createTrueFlowVector(V))
def ur(V):
""" Grid relative u component
"""
return DerivedGridFactory.getUComponent(V)
def vn(V):
""" North relative v component
"""
return vr(DerivedGridFactory.createTrueFlowVector(V))
def vor(V):
""" Relative Vorticity
<div class=jython>
VOR ( V ) = DDX ( v ) - DDY ( u )
</div>
"""
return sub(ddx(vr(V)),ddy(ur(V)))
def vr(V):
""" Grid relative v component
"""
return DerivedGridFactory.getVComponent(V)
def xav(S):
""" Average along a grid row
<div class=jython>
XAV (S) = ( S (X1) + S (X2) + ... + S (KXD) ) / KNT
KXD = number of points in row
KNT = number of non-missing points in row
XAV for a row is stored at every point in that row.
</div>
"""
return GridMath.applyFunctionToAxis(S, GridMath.FUNC_AVERAGE, GridMath.AXIS_X)
def xsum(S):
""" Sum along a grid row
<div class=jython>
XSUM (S) = ( S (X1) + S (X2) + ... + S (KXD) )
KXD = number of points in row
XSUM for a row is stored at every point in that row.
</div>
"""
return GridMath.applyFunctionToAxis(S, GridMath.FUNC_SUM, GridMath.AXIS_X)
def yav(S):
""" Average along a grid column
<div class=jython>
YAV (S) = ( S (Y1) + S (Y2) + ... + S (KYD) ) / KNT
KYD = number of points in column
KNT = number of non-missing points in column
</div>
"""
return GridMath.applyFunctionToAxis(S, GridMath.FUNC_AVERAGE, GridMath.AXIS_Y)
def ysum(S):
""" Sum along a grid column
<div class=jython>
YSUM (S) = ( S (Y1) + S (Y2) + ... + S (KYD) )
KYD = number of points in row
YSUM for a column is stored at every point in that column.
</div>
"""
return GridMath.applyFunctionToAxis(S, GridMath.FUNC_SUM, GridMath.AXIS_Y)
def zav(S):
""" Average across the levels of a grid at all points
<div class=jython>
ZAV (S) = ( S (Z1) + S (Z2) + ... + S (KZD) ) / KNT
KZD = number of levels
KNT = number of non-missing points in column
</div>
"""
return GridMath.applyFunctionToLevels(S, GridMath.FUNC_AVERAGE)
def zsum(S):
""" Sum across the levels of a grid at all points
<div class=jython>
ZSUM (S) = ( S (Z1) + S (Z2) + ... + S (KZD) )
KZD = number of levels
ZSUM for a vertical column is stored at every point
</div>
"""
return GridMath.applyFunctionOverLevels(S, GridMath.FUNC_SUM)
def wshr(V, Z, top, bottom):
""" Magnitude of the vertical wind shear in a layer
<div class=jython>
WSHR ( V ) = MAG [ VLDF (V) ] / LDF (Z)
</div>
"""
dv = mag(vldf(V,top,bottom))
dz = ldf(Z,top,bottom)
return quo(dv,dz)
# Vector output
def age(obs,geo):
""" Ageostrophic wind
<div class=jython>
AGE ( S ) = [ u (OBS) - u (GEO(S)), v (OBS) - v (GEO(S)) ]
</div>
"""
return sub(obs,geo)
def circv(S, D=2):
"""
<div class=jython>
Apply a circular aperature smoothing to the grid points. The weighting
function is the circular aperature diffraction function. D is
the radius of influence in grid increments, increasing D increases
the smoothing. (default D=2)
</div>
"""
return GridUtil.smooth(S, "CIRC", int(D))
def cresv(S, D=2):
"""
<div class=jython>
Apply a Cressman smoothing to the grid points. The smoothed value
is given by a weighted average of surrounding grid points. D is
the radius of influence in grid increments,
increasing D increases the smoothing. (default D=2)
</div>
"""
return GridUtil.smooth(S, "CRES", int(D))
def dvdx(V):
""" Partial x derivative of a vector
<div class=jython>
DVDX ( V ) = [ DDX (u), DDX (v) ]
</div>
"""
return vecr(ddx(ur(V)), ddx(vr(V)))
def dvdy(V):
""" Partial x derivative of a vector
<div class=jython>
DVDY ( V ) = [ DDY (u), DDY (v) ]
</div>
"""
return vecr(ddy(ur(V)), ddy(vr(V)))
def frnt(S,V):
""" Frontogenesis function from theta and the wind
<div class=jython>
FRNT ( THTA, V ) = 1/2 * MAG ( GRAD (THTA) ) *
( DEF * COS (2 * BETA) - DIV ) <p>
Where: BETA = ASIN ( (-DDX (THTA) * COS (PSI) <br>
- DDY (THTA) * SIN (PSI))/ <br>
MAG ( GRAD (THTA) ) ) <br>
PSI = 1/2 ATAN2 ( SHR / STR ) <br>
</div>
"""
shear = shr(V)
strch = strd(V)
psi = .5*atn2(shear,strch)
dxt = ddx(S)
dyt = ddy(S)
cosd = cos(psi)
sind = sin(psi)
gradt = grad(S)
mgradt = mag(gradt)
a = -cosd*dxt-sind*dyt
beta = asin(a/mgradt)
frnto = .5*mgradt*(defr(V)*cos(2*beta)-div(V))
return frnto
def geo(z):
""" geostrophic wind from height
<div class=jython>
GEO ( S ) = [ - DDY (S) * const / CORL, DDX (S) * const / CORL ]
</div>
"""
return DerivedGridFactory.createGeostrophicWindVector(z)
def grad(S):
""" Gradient of a scalar
<div class=jython>
GRAD ( S ) = [ DDX ( S ), DDY ( S ) ]
</div>
"""
return vecr(ddx(S),ddy(S))
def gwfv(V, N=6):
"""
<div class=jython>
Horizontal smoothing using normally distributed weights
with theoretical response of 1/e for N * delta-x wave.
Increasing N increases the smoothing. (default N=6)
</div>
"""
return gwfs(V, N)
def inad(V1,V2):
""" Inertial advective wind
<div class=jython>
INAD ( V1, V2 ) = [ DOT ( V1, GRAD (u2) ),
DOT ( V1, GRAD (v2) ) ]
</div>
"""
return vecr(dot(V1,grad(ur(V2))),dot(V1,grad(vr(V2))))
def qvec(S,V):
""" Q-vector at a level ( K / m / s )
<div class=jython>
QVEC ( S, V ) = [ - ( DOT ( DVDX (V), GRAD (S) ) ),
- ( DOT ( DVDY (V), GRAD (S) ) ) ]
where S can be any thermal paramenter, usually THTA.
</div>
"""
grads = grad(S)
qvecu = newName(-dot(dvdx(V),grads),"qvecu")
qvecv = newName(-dot(dvdy(V),grads),"qvecv")
return vecr(qvecu,qvecv)
def qvcl(THTA,V):
""" Q-vector ( K / m / s )
<div class=jython>
QVCL ( THTA, V ) = ( 1/( D (THTA) / DP ) ) *
[ ( DOT ( DVDX (V), GRAD (THTA) ) ),
( DOT ( DVDY (V), GRAD (THTA) ) ) ]
</div>
"""
dtdp = GridMath.partial(THTA,2)
gradt = grad(THTA)
qvecudp = newName(quo(dot(dvdx(V),gradt),dtdp),"qvecudp")
qvecvdp = newName(quo(dot(dvdy(V),gradt),dtdp),"qvecvdp")
return vecr(qvecudp,qvecvdp)
def rectv(S, D=2):
"""
<div class=jython>
Apply a rectangular aperature smoothing to the grid points. The weighting
function is the product of the rectangular aperature diffraction function
in the x and y directions. D is the radius of influence in grid
increments, increasing D increases the smoothing. (default D=2)
</div>
"""
return GridUtil.smooth(S, "RECT", int(D))
def sm5v(V):
""" Smooth a scalar grid using a 5-point smoother (see sm5s)
"""
return sm5s(V)
def sm9v(V):
""" Smooth a scalar grid using a 9-point smoother (see sm9s)
"""
return sm9s(V)
def thrm(S, level1, level2, unit=None):
""" Thermal wind
<div class=jython>
THRM ( S ) = [ u (GEO(S)) (level1) - u (GEO(S)) (level2),
v (GEO(S)) (level1) - v (GEO(S)) (level2) ]
</div>
"""
return vldf(geo(S),level1,level2, unit)
def vadd(V1,V2):
""" add the components of 2 vectors
<div class=jython>
VADD (V1, V2) = [ u1+u2, v1+v2 ]
</div>
"""
return add(V1,V2)
def vecn(S1,S2):
""" Make a true north vector from two components
<div class=jython>
VECN ( S1, S2 ) = [ S1, S2 ]
</div>
"""
return makeTrueVector(S1,S2)
def vecr(S1,S2):
""" Make a vector from two components
<div class=jython>
VECR ( S1, S2 ) = [ S1, S2 ]
</div>
"""
return makeVector(S1,S2)
def vlav(V,level1,level2, unit=None):
""" calculate the vector layer average
<div class=jython>
VLDF(V) = [(u(level1) - u(level2))/2,
(v(level1) - v(level2))/2]
</div>
"""
return layerAverage(V, level1, level2, unit)
def vldf(V,level1,level2, unit=None):
""" calculate the vector layer difference
<div class=jython>
VLDF(V) = [u(level1) - u(level2),
v(level1) - v(level2)]
</div>
"""
return layerDiff(V,level1,level2, unit)
def vmul(V1,V2):
""" Multiply the components of 2 vectors
<div class=jython>
VMUL (V1, V2) = [ u1*u2, v1*v2 ]
</div>
"""
return mul(V1,V2)
def vquo(V1,V2):
""" Divide the components of 2 vectors
<div class=jython>
VQUO (V1, V2) = [ u1/u2, v1/v2 ]
</div>
"""
return quo(V1,V2)
def vsub(V1,V2):
""" subtract the components of 2 vectors
<div class=jython>
VSUB (V1, V2) = [ u1-u2, v1-v2 ]
</div>
"""
return sub(V1,V2)
def LPIndex(u, v, z, t, top, bottom, unit):
""" calculate the wind shear between discrete layers
<div class=jython>
LP = 7.268DUDZ + 0.718DTDN + 0.318DUDN - 2.52
</div>
"""
Z = windShear(u, v, z, top, bottom, unit)*7.268
uwind = getSliceAtLevel(u, top)
vwind = getSliceAtLevel(v, top)
temp = newUnit(getSliceAtLevel(t, top), "temperature", "celsius")
HT = sqrt(ddx(temp)*ddx(temp) + ddy(temp)*ddy(temp))*0.718
HU = (ddx(vwind) + ddy(uwind))*0.318
L = add(noUnit(Z), add(noUnit(HU), noUnit(HT)))
L = (L - 2.520)*(-0.59)
P= 1.0/(1.0 + GridMath.applyFunctionOverGridsExt(L,"exp"))
LP = setLevel(P ,top, unit)
return LP
def EllrodIndex(u, v, z, top, bottom, unit):
""" calculate the wind shear between discrete layers
<div class=jython>
EI = VWS X ( DEF + DIV)
</div>
"""
VWS = windShear(u, v, z, top, bottom, unit)*100.0
#
uwind = getSliceAtLevel(u, top)
vwind = getSliceAtLevel(v, top)
DIV = (ddx(uwind) + ddy(vwind))* (-1.0)
#
DSH = ddx(vwind) + ddy(uwind)
DST = ddx(uwind) - ddy(vwind)
DEF = sqrt(DSH * DSH + DST * DST)
EI = mul(noUnit(VWS), add(noUnit(DEF), noUnit(DIV)))
return setLevel(EI, top, unit)
| [] |
DevilBit/Twitter-Bot | app.py | 6f1b285aeb5faf37906d575775a927e69a5321d6 | from selenium import webdriver #to get the browser
from selenium.webdriver.common.keys import Keys #to send key to browser
import getpass #to get password safely
import time #to pause the program
#a calss to store all twetter related objects and functions
class twitter_bot:
def __init__(self, username, password):
self.username = username
self.password = password
self.bot = webdriver.Firefox()
#login function
def login(self):
bot = self.bot
bot.get('https://twitter.com/login')
#sleep to wait for the browser to get the website
time.sleep(3)
email = bot.find_element_by_class_name('js-username-field') #get the email field
password = bot.find_element_by_class_name('js-password-field') #get the password field
#clear the email and password field just in case of autofill
email.clear()
password.clear()
#fill in email field
email.send_keys(self.username)
time.sleep(2)
#fill in password field
password.send_keys(self.password)
time.sleep(2)
#click the login button
bot.find_element_by_class_name("EdgeButtom--medium").click()
time.sleep(3)
def like_tweet(self, search):
bot = self.bot
#use keyword to search
bot.get('https://twitter.com/search?q=' + search + '&src=typd')
bot.implicitly_wait(3)
#get posts
for i in range(0, 30):
bot.execute_script('window.scrollTo(0, document.body.scrollHeight)')
time.sleep(10)
tweets = bot.find_elements_by_class_name('tweet')
links = [element.get_attribute('data-permalink-path') for element in tweets]
#like posts
for link in links:
bot.get('https://twitter.com/' + link)
try:
bot.find_element_by_class_name('HeartAnimation').click()
time.sleep(10)
except Exception as ex:
time.sleep(60)
if __name__ == '__main__':
username = input('Email: ')
password = getpass.getpass('Password: ')
search = input('Please enter keyword: ')
user = twitter_bot(username, password)
user.login()
time.sleep(10)
user.like_tweet(search)
| [((62, 15, 62, 44), 'getpass.getpass', 'getpass.getpass', ({(62, 31, 62, 43): '"""Password: """'}, {}), "('Password: ')", False, 'import getpass\n'), ((66, 4, 66, 18), 'time.sleep', 'time.sleep', ({(66, 15, 66, 17): '(10)'}, {}), '(10)', False, 'import time\n'), ((11, 19, 11, 38), 'selenium.webdriver.Firefox', 'webdriver.Firefox', ({}, {}), '()', False, 'from selenium import webdriver\n'), ((18, 8, 18, 21), 'time.sleep', 'time.sleep', ({(18, 19, 18, 20): '(3)'}, {}), '(3)', False, 'import time\n'), ((28, 8, 28, 21), 'time.sleep', 'time.sleep', ({(28, 19, 28, 20): '(2)'}, {}), '(2)', False, 'import time\n'), ((32, 8, 32, 21), 'time.sleep', 'time.sleep', ({(32, 19, 32, 20): '(2)'}, {}), '(2)', False, 'import time\n'), ((36, 8, 36, 21), 'time.sleep', 'time.sleep', ({(36, 19, 36, 20): '(3)'}, {}), '(3)', False, 'import time\n'), ((47, 12, 47, 26), 'time.sleep', 'time.sleep', ({(47, 23, 47, 25): '(10)'}, {}), '(10)', False, 'import time\n'), ((55, 16, 55, 30), 'time.sleep', 'time.sleep', ({(55, 27, 55, 29): '(10)'}, {}), '(10)', False, 'import time\n'), ((57, 16, 57, 30), 'time.sleep', 'time.sleep', ({(57, 27, 57, 29): '(60)'}, {}), '(60)', False, 'import time\n')] |
shyhyawJou/GradCAM-pytorch | visualization.py | 8159f077552fc71055fe97c17bf8544d32cc8b0f | import torch
import torch.nn as nn
from torch.nn import functional as F
from PIL import Image
import cv2 as cv
from matplotlib import cm
import numpy as np
class GradCAM:
"""
#### Args:
layer_name: module name (not child name), if None,
will use the last layer before average pooling
, default is None
"""
def __init__(self, model, device, layer_name=None, close_some_grad=True):
if layer_name is None:
layer_name = self.get_layer_name(model)
if layer_name is None:
raise ValueError(
"There is no global average pooling layer, plz specify 'layer_name'"
)
for n, m in model.named_children():
if close_some_grad:
m.requires_grad_(False)
for sub_n, sub_m in m.named_modules():
if '.'.join((n, sub_n)) == layer_name:
sub_m.register_forward_hook(self.forward_hook)
sub_m.register_full_backward_hook(self.backward_hook)
m.requires_grad_(True)
break
model = model.to(device)
self.model = model
self.device = device
self.feature_maps = {}
self.gradients = {}
def get_heatmap(self, img, img_tensor):
self.model.zero_grad()
img_tensor = img_tensor.to(self.device)
outputs = self.model(img_tensor)
_, pred_label = outputs.max(1)
# outputs shape = 1x2
outputs[0][pred_label].backward()
with torch.no_grad():
feature_maps = self.feature_maps["output"]
# "gradients" is a tuple with one item
grad_weights = self.gradients["output"][0]
h, w = grad_weights.size()[-2:]
grad_weights = grad_weights.sum((2,3), True) / (h * w)
cam = (grad_weights * feature_maps).sum(1)
F.relu(cam, True)
cam = cam / cam.max() * 255
cam = cam.to(dtype=torch.uint8, device="cpu")
cam = cam.numpy().transpose(1,2,0)
cam = cv.resize(cam, img.size[:2], interpolation=4)
cam = np.uint8(255 * cm.get_cmap("jet")(cam.squeeze()))
if not isinstance(img, np.ndarray):
img = np.asarray(img)
img_size = img.shape[:2][::-1] # w, h
overlay = np.uint8(0.6*img + 0.4 * cam[:,:,:3])
overlay = Image.fromarray(overlay)
if overlay.size != img_size:
overlay = overlay.resize(img_size, Image.BILINEAR)
return outputs.detach(), overlay
def get_layer_name(self, model):
layer_name = None
for n, m in model.named_children():
for sub_n, sub_m in m.named_modules():
if isinstance(sub_m, (nn.AdaptiveAvgPool2d, nn.AvgPool2d)):
layer_name = tmp
tmp = '.'.join((n, sub_n))
return layer_name
def forward_hook(self, module, x, y):
#self.feature_maps["input"] = x
self.feature_maps["output"] = y
def backward_hook(self, module, x, y):
#self.gradients["input"] = x
self.gradients["output"] = y
self.gradients["output"] = y
| [((52, 13, 52, 28), 'torch.no_grad', 'torch.no_grad', ({}, {}), '()', False, 'import torch\n'), ((59, 12, 59, 29), 'torch.nn.functional.relu', 'F.relu', ({(59, 19, 59, 22): 'cam', (59, 24, 59, 28): '(True)'}, {}), '(cam, True)', True, 'from torch.nn import functional as F\n'), ((63, 18, 63, 63), 'cv2.resize', 'cv.resize', (), '', True, 'import cv2 as cv\n'), ((70, 22, 70, 59), 'numpy.uint8', 'np.uint8', ({(70, 31, 70, 58): '0.6 * img + 0.4 * cam[:, :, :3]'}, {}), '(0.6 * img + 0.4 * cam[:, :, :3])', True, 'import numpy as np\n'), ((71, 22, 71, 46), 'PIL.Image.fromarray', 'Image.fromarray', ({(71, 38, 71, 45): 'overlay'}, {}), '(overlay)', False, 'from PIL import Image\n'), ((67, 22, 67, 37), 'numpy.asarray', 'np.asarray', ({(67, 33, 67, 36): 'img'}, {}), '(img)', True, 'import numpy as np\n'), ((64, 33, 64, 51), 'matplotlib.cm.get_cmap', 'cm.get_cmap', ({(64, 45, 64, 50): '"""jet"""'}, {}), "('jet')", False, 'from matplotlib import cm\n')] |
Mechachleopteryx/CogAlg | frame_2D_alg/alternative versions/intra_blob_xy.py | 723104e1f57010e52f1dc249ba53ba58db0a991b | '''
2D version of 1st-level algorithm is a combination of frame_blobs, intra_blob, and comp_P: optional raster-to-vector conversion.
intra_blob recursively evaluates each blob for two forks of extended internal cross-comparison and sub-clustering:
der+: incremental derivation cross-comp in high-variation edge areas of +vg: positive deviation of gradient triggers comp_g,
rng+: incremental range cross-comp in low-variation flat areas of +v--vg: positive deviation of negated -vg triggers comp_r.
Each adds a layer of sub_blobs per blob.
Please see diagram: https://github.com/boris-kz/CogAlg/blob/master/frame_2D_alg/Illustrations/intra_blob_2_fork_scheme.png
Blob structure, for all layers of blob hierarchy:
root_dert__,
Dert = I, iDy, iDx, G, Dy, Dx, M, S (area), Ly (vertical dimension)
# I: input, (iDy, iDx): angle of input gradient, G: gradient, (Dy, Dx): vertical and lateral Ds, M: match
sign,
box, # y0, yn, x0, xn
dert__, # box of derts, each = i, idy, idx, g, dy, dx, m
stack_[ stack_params, Py_ [(P_params, dert_)]]: refs down blob formation tree, in vertical (horizontal) order
# next fork:
fcr, # flag comp rng, also clustering criterion in dert and Dert: g in der+ fork, i+m in rng+ fork?
fig, # flag input is gradient
rdn, # redundancy to higher layers
rng, # comp range
sub_layers # [sub_blobs ]: list of layers across sub_blob derivation tree
# deeper layers are nested, multiple forks: no single set of fork params?
'''
from collections import deque, defaultdict
from class_cluster import ClusterStructure, NoneType
from class_bind import AdjBinder
from frame_blobs_yx import assign_adjacents
from intra_comp_g import comp_g, comp_r
from itertools import zip_longest
from class_stream import BlobStreamer
from utils import pairwise
import numpy as np
# from comp_P_draft import comp_P_blob
# filters, All *= rdn:
ave = 50 # fixed cost per dert, from average m, reflects blob definition cost, may be different for comp_a?
aveB = 50 # fixed cost per intra_blob comp and clustering
class CDeepP(ClusterStructure):
I = int
G = int
Dy = int
Dx = int
M = int
iDy = int
iDx = int
L = int
x0 = int
sign = NoneType
class CDeepStack(ClusterStructure):
I = int
G = int
Dy = int
Dx = int
M = int
iDy = int
iDx = int
S = int
Ly = int
y0 = int
Py_ = list
blob = object
down_connect_cnt = int
sign = NoneType
class CDeepBlob(ClusterStructure):
Dert = dict
box = list
stack_ = list
sign = NoneType
open_stacks = int
root_dert__ = object
dert__ = object
mask = object
adj_blobs = list
fopen = bool
margin = list
fcr = bool
fig = bool
rdn = float
rng = int
Ls = int # for visibility and next-fork rdn
sub_layers = list
# --------------------------------------------------------------------------------------------------------------
# functions, ALL WORK-IN-PROGRESS:
def intra_blob(blob, rdn, rng, fig, fcr, **kwargs): # recursive input rng+ | der+ cross-comp within blob
# fig: flag input is g | p, fcr: flag comp over rng+ | der+
if kwargs.get('render', None) is not None: # stop rendering sub-blobs when blob is too small
if blob.Dert['S'] < 100:
kwargs['render'] = False
spliced_layers = [] # to extend root_blob sub_layers
ext_dert__, ext_mask = extend_dert(blob)
if fcr:
dert__, mask = comp_r(ext_dert__, fig, fcr, ext_mask) # -> m sub_blobs
else:
dert__, mask = comp_g(ext_dert__, ext_mask) # -> g sub_blobs:
if dert__[0].shape[0] > 2 and dert__[0].shape[1] > 2 and False in mask: # min size in y and x, least one dert in dert__
sub_blobs = cluster_derts(dert__, mask, ave * rdn, fcr, fig, **kwargs)
# fork params:
blob.fcr = fcr
blob.fig = fig
blob.rdn = rdn
blob.rng = rng
blob.Ls = len(sub_blobs) # for visibility and next-fork rdn
blob.sub_layers = [sub_blobs] # 1st layer of sub_blobs
for sub_blob in sub_blobs: # evaluate for intra_blob comp_g | comp_r:
G = blob.Dert['G']; adj_G = blob.adj_blobs[2]
borrow = min(abs(G), abs(adj_G) / 2) # or adjacent M if negative sign?
if sub_blob.sign:
if sub_blob.Dert['M'] - borrow > aveB * rdn: # M - (intra_comp value lend to edge blob)
# comp_r fork:
blob.sub_layers += intra_blob(sub_blob, rdn + 1 + 1 / blob.Ls, rng * 2, fig=fig, fcr=1, **kwargs)
# else: comp_P_
elif sub_blob.Dert['G'] + borrow > aveB * rdn: # G + (intra_comp value borrow from flat blob)
# comp_g fork:
blob.sub_layers += intra_blob(sub_blob, rdn + 1 + 1 / blob.Ls, rng=rng, fig=1, fcr=0, **kwargs)
# else: comp_P_
spliced_layers = [spliced_layers + sub_layers for spliced_layers, sub_layers in
zip_longest(spliced_layers, blob.sub_layers, fillvalue=[])]
return spliced_layers
def cluster_derts(dert__, mask, Ave, fcr, fig, render=False): # similar to frame_to_blobs
if fcr: # comp_r output; form clustering criterion:
if fig:
crit__ = dert__[0] + dert__[6] - Ave # eval by i + m, accum in rng; dert__[:,:,0] if not transposed
else:
crit__ = Ave - dert__[3] # eval by -g, accum in rng
else: # comp_g output
crit__ = dert__[6] - Ave # comp_g output eval by m, or clustering is always by m?
root_dert__ = dert__ # derts after the comps operation, which is the root_dert__
dert__ = [*zip(*dert__)] # transpose dert__ into shape [y, params, x]
sub_blobs = [] # from form_blob:
stack_ = deque() # buffer of running vertical stacks of Ps
stack_binder = AdjBinder(CDeepStack)
if render:
streamer = BlobStreamer(CDeepBlob, crit__, mask)
if render:
streamer = BlobStreamer(CDeepBlob, crit__, mask)
for y, dert_ in enumerate(dert__): # in height, first and last row are discarded; print(f'Processing intra line {y}...')
# if False in mask[i]: # [y,x,params], there is at least one dert in line
P_binder = AdjBinder(CDeepP) # binder needs data about clusters of the same level
P_ = form_P_(zip(*dert_), crit__[y], mask[y], P_binder) # horizontal clustering, adds a row of Ps
if render:
render = streamer.update_blob_conversion(y, P_) # if return False, stop rendering
P_ = scan_P_(P_, stack_, root_dert__, sub_blobs, P_binder) # vertical clustering, adds up_connects per P and down_connect_cnt per stack
stack_ = form_stack_(P_, root_dert__, sub_blobs, y)
stack_binder.bind_from_lower(P_binder)
while stack_: # frame ends, last-line stacks are merged into their blobs:
form_blob(stack_.popleft(), root_dert__, sub_blobs)
blob_binder = AdjBinder(CDeepBlob)
blob_binder.bind_from_lower(stack_binder)
assign_adjacents(blob_binder) # add adj_blobs to each blob
# sub_blobs = find_adjacent(sub_blobs)
if render: # rendering mode after blob conversion
streamer.end_blob_conversion(y)
return sub_blobs
# clustering functions:
# -------------------------------------------------------------------------------------------------------------------
def form_P_(dert_, crit_, mask_, binder): # segment dert__ into P__, in horizontal ) vertical order
P_ = deque() # row of Ps
sign_ = crit_ > 0
x0 = 0
try:
while mask_[x0]: # skip until not masked
next(dert_)
x0 += 1
except IndexError:
return P_ # the whole line is masked, return an empty P
I, iDy, iDx, G, Dy, Dx, M, L = *next(dert_), 1 # initialize P params
_sign = sign_[x0]
_mask = mask_[x0] # mask bit per dert
for x, (i, idy, idx, g, dy, dx, m) in enumerate(dert_, start=x0+1): # loop left to right in each row of derts
mask = mask_[x]
if ~mask: # current dert is not masked
sign = sign_[x]
if ~_mask and sign != _sign: # prior dert is not masked and sign changed
# pack P
P = CDeepP(I=I, G=G, Dy=Dy, Dx=Dx, M=M, iDy=iDy, iDx=iDx, L=L,x0=x0, sign=_sign)
P_.append(P)
# initialize P params:
I, iDy, iDx, G, Dy, Dx, M, L, x0 = 0, 0, 0, 0, 0, 0, 0, 0, x
elif _mask:
I, iDy, iDx, G, Dy, Dx, M, L, x0 = 0, 0, 0, 0, 0, 0, 0, 0, x
# current dert is masked
elif ~_mask: # prior dert is not masked
# pack P
P = CDeepP(I=I, G=G, Dy=Dy, Dx=Dx, M=M, iDy=iDy, iDx=iDx, L=L, x0=x0, sign=_sign)
P_.append(P)
# initialize P params: (redundant)
# I, iDy, iDx, G, Dy, Dx, M, L, x0 = 0, 0, 0, 0, 0, 0, 0, 0, x + 1
if ~mask: # accumulate P params:
I += i
iDy += idy
iDx += idx
G += g
Dy += dy
Dx += dx
M += m
L += 1
_sign = sign # prior sign
_mask = mask
if ~_mask: # terminate and pack last P in a row if prior dert is unmasked
P = CDeepP(I=I, G=G, Dy=Dy, Dx=Dx, M=M, iDy=iDy, iDx=iDx, L=L, x0=x0, sign=_sign)
P_.append(P)
for _P, P in pairwise(P_):
if _P.x0 + _P.L == P.x0: # check if Ps are adjacents
binder.bind(_P, P)
return P_
def scan_P_(P_, stack_, root_dert__, sub_blobs, binder): # merge P into higher-row stack of Ps with same sign and x_coord overlap
next_P_ = deque() # to recycle P + up_connect_ that finished scanning _P, will be converted into next_stack_
if P_ and stack_: # if both input row and higher row have any Ps / _Ps left
P = P_.popleft() # load left-most (lowest-x) input-row P
stack = stack_.popleft() # higher-row stacks
_P = stack.Py_[-1] # last element of each stack is higher-row P
up_connect_ = [] # list of same-sign x-overlapping _Ps per P
while True: # while both P_ and stack_ are not empty
x0 = P.x0 # first x in P
xn = x0 + P.L # first x beyond P
_x0 = _P.x0 # first x in _P
_xn = _x0 + _P.L # first x beyond _P
if stack.G > 0: # check for overlaps in 8 directions, else a blob may leak through its external blob
if _x0 - 1 < xn and x0 < _xn + 1: # x overlap between loaded P and _P
if P.sign == stack.sign: # sign match
stack.down_connect_cnt += 1
up_connect_.append(stack) # buffer P-connected higher-row stacks into P' up_connect_
else:
binder.bind(_P, P)
else: # -G, check for orthogonal overlaps only: 4 directions, edge blobs are more selective
if _x0 < xn and x0 < _xn: # x overlap between loaded P and _P
if P.sign == stack.sign: # sign match
stack.down_connect_cnt += 1
up_connect_.append(stack) # buffer P-connected higher-row stacks into P' up_connect_
else:
binder.bind(_P, P)
if (xn < _xn or # _P overlaps next P in P_
xn == _xn and stack.sign): # sign taken accounted
next_P_.append((P, up_connect_)) # recycle _P for the next run of scan_P_
up_connect_ = []
if P_:
P = P_.popleft() # load next P
else: # terminate loop
if stack.down_connect_cnt != 1: # terminate stack, merge it into up_connects' blobs
form_blob(stack, root_dert__, sub_blobs)
break
else: # no next-P overlap
if stack.down_connect_cnt != 1: # terminate stack, merge it into up_connects' blobs
form_blob(stack, root_dert__, sub_blobs)
if stack_: # load stack with next _P
stack = stack_.popleft()
_P = stack.Py_[-1]
else: # no stack left: terminate loop
next_P_.append((P, up_connect_))
break
while P_: # terminate Ps and stacks that continue at row's end
next_P_.append((P_.popleft(), [])) # no up_connect
while stack_:
form_blob(stack_.popleft(), root_dert__, sub_blobs) # down_connect_cnt always == 0
return next_P_ # each element is P + up_connect_ refs
def form_stack_(P_, root_dert__, sub_blobs, y): # Convert or merge every P into its stack of Ps, merge blobs
next_stack_ = deque() # converted to stack_ in the next run of scan_P_
while P_:
P, up_connect_ = P_.popleft()
I, G, Dy, Dx, M, iDy, iDx, L, x0, s = P.unpack()
xn = x0 + L # next-P x0
if not up_connect_:
# initialize new stack for each input-row P that has no connections in higher row:
blob = CDeepBlob(Dert=dict(I=0, G=0, Dy=0, Dx=0, M=0, iDy=0, iDx=0, S=0, Ly=0),
box=[y, x0, xn], stack_=[], sign=s, open_stacks=1)
new_stack = CDeepStack(I=I, G=G, Dy=0, Dx=Dx, M=M, iDy=iDy, iDx=iDx, S=L, Ly=1,
y0=y, Py_=[P], blob=blob, down_connect_cnt=0, sign=s)
new_stack.hid = blob.id
blob.stack_.append(new_stack)
else:
if len(up_connect_) == 1 and up_connect_[0].down_connect_cnt == 1:
# P has one up_connect and that up_connect has one down_connect=P: merge P into up_connect stack:
new_stack = up_connect_[0]
new_stack.accumulate(I=I, G=G, Dy=Dy, Dx=Dx, M=M, iDy=iDy, iDx=iDx, S=L, Ly=1)
new_stack.Py_.append(P) # Py_: vertical buffer of Ps
new_stack.down_connect_cnt = 0 # reset down_connect_cnt
blob = new_stack.blob
else: # if > 1 up_connects, or 1 up_connect that has > 1 down_connect_cnt:
blob = up_connect_[0].blob
# initialize new_stack with up_connect blob:
new_stack = CDeepStack(I=I, G=G, Dy=0, Dx=Dx, M=M, iDy=iDy, iDx=iDx, S=L, Ly=1,
y0=y, Py_=[P], blob=blob, down_connect_cnt=0, sign=s)
new_stack.hid = blob.id
blob.stack_.append(new_stack)
if len(up_connect_) > 1: # merge blobs of all up_connects
if up_connect_[0].down_connect_cnt == 1: # up_connect is not terminated
form_blob(up_connect_[0], root_dert__, sub_blobs) # merge stack of 1st up_connect into its blob
for up_connect in up_connect_[1:len(up_connect_)]: # merge blobs of other up_connects into blob of 1st up_connect
if up_connect.down_connect_cnt == 1:
form_blob(up_connect, root_dert__, sub_blobs)
if not up_connect.blob is blob:
merged_blob = up_connect.blob
I, G, Dy, Dx, M, iDy, iDx, S, Ly = merged_blob.Dert.values()
accum_Dert(blob.Dert, I=I, G=G, Dy=Dy, Dx=Dx, M=M, iDy=iDy, iDx=iDx, S=S, Ly=Ly)
blob.open_stacks += merged_blob.open_stacks
blob.box[0] = min(blob.box[0], merged_blob.box[0]) # extend box y0
blob.box[1] = min(blob.box[1], merged_blob.box[1]) # extend box x0
blob.box[2] = max(blob.box[2], merged_blob.box[2]) # extend box xn
for stack in merged_blob.stack_:
if not stack is up_connect:
stack.blob = blob # blobs in other up_connects are references to blob in the first up_connect.
stack.hid = blob.id
blob.stack_.append(stack) # buffer of merged root stacks.
up_connect.blob = blob
up_connect.hid = blob.id
blob.stack_.append(up_connect)
blob.open_stacks -= 1 # overlap with merged blob.
blob.box[1] = min(blob.box[1], x0) # extend box x0
blob.box[2] = max(blob.box[2], xn) # extend box xn
P.hid = new_stack.id # assign higher cluster id for P
next_stack_.append(new_stack)
return next_stack_
def form_blob(stack, root_dert__, sub_blobs): # increment blob with terminated stack, check for blob termination
I, G, Dy, Dx, M, iDy, iDx, S, Ly, y0, Py_, blob, down_connect_cnt, sign = stack.unpack()
accum_Dert(blob.Dert, I=I, G=G, Dy=Dy, Dx=Dx, M=M, iDy=iDy, iDx=iDx, S=S, Ly=Ly)
# terminated stack is merged into continued or initialized blob (all connected stacks):
blob.open_stacks += down_connect_cnt - 1 # incomplete stack cnt + terminated stack down_connect_cnt - 1: stack itself
# open stacks contain Ps of a current row and may be extended with new x-overlapping Ps in next run of scan_P_
if blob.open_stacks == 0: # if number of incomplete stacks == 0
# blob is terminated and packed in blob root:
last_stack = stack
y0, x0, xn = blob.box
yn = last_stack.y0 + last_stack.Ly
mask = np.ones((yn - y0, xn - x0), dtype=bool) # mask box, then unmask Ps:
for stack in blob.stack_:
for y, P in enumerate(stack.Py_, start=stack.y0 - y0):
x_start = P.x0 - x0
x_stop = x_start + P.L
mask[y, x_start:x_stop] = False
fopen = 0 # flag: blob on frame boundary
if x0 == 0 or xn == root_dert__[0].shape[1] or y0 == 0 or yn == root_dert__[0].shape[0]:
fopen = 1
blob.root_dert__ = root_dert__
blob.box = (y0, yn, x0, xn)
blob.dert__ = [derts[y0:yn, x0:xn] for derts in root_dert__]
blob.mask = mask
blob.adj_blobs = [[], 0, 0]
blob.fopen = fopen
sub_blobs.append(blob)
def extend_dert(blob): # extend dert borders (+1 dert to boundaries)
y0, yn, x0, xn = blob.box # extend dert box:
rY, rX = blob.root_dert__[0].shape # higher dert size
# determine pad size
y0e = max(0, y0 - 1)
yne = min(rY, yn + 1)
x0e = max(0, x0 - 1)
xne = min(rX, xn + 1) # e is for extended
# take ext_dert__ from part of root_dert__
ext_dert__ = [derts[y0e:yne, x0e:xne] if derts is not None else None
for derts in blob.root_dert__]
# pad mask: top, btm, left, right. 1 or 0 at boundaries
mask = np.pad(blob.mask, ((y0 - y0e, yne - yn), (x0 - x0e, xne - xn)),
mode='constant', constant_values=True)
return ext_dert__, mask
def accum_Dert(Dert: dict, **params) -> None:
Dert.update({param: Dert[param] + value for param, value in params.items()}) | [((148, 13, 148, 20), 'collections.deque', 'deque', ({}, {}), '()', False, 'from collections import deque, defaultdict\n'), ((149, 19, 149, 40), 'class_bind.AdjBinder', 'AdjBinder', ({(149, 29, 149, 39): 'CDeepStack'}, {}), '(CDeepStack)', False, 'from class_bind import AdjBinder\n'), ((168, 18, 168, 38), 'class_bind.AdjBinder', 'AdjBinder', ({(168, 28, 168, 37): 'CDeepBlob'}, {}), '(CDeepBlob)', False, 'from class_bind import AdjBinder\n'), ((170, 4, 170, 33), 'frame_blobs_yx.assign_adjacents', 'assign_adjacents', ({(170, 21, 170, 32): 'blob_binder'}, {}), '(blob_binder)', False, 'from frame_blobs_yx import assign_adjacents\n'), ((184, 9, 184, 16), 'collections.deque', 'deque', ({}, {}), '()', False, 'from collections import deque, defaultdict\n'), ((234, 17, 234, 29), 'utils.pairwise', 'pairwise', ({(234, 26, 234, 28): 'P_'}, {}), '(P_)', False, 'from utils import pairwise\n'), ((243, 14, 243, 21), 'collections.deque', 'deque', ({}, {}), '()', False, 'from collections import deque, defaultdict\n'), ((305, 18, 305, 25), 'collections.deque', 'deque', ({}, {}), '()', False, 'from collections import deque, defaultdict\n'), ((421, 11, 422, 56), 'numpy.pad', 'np.pad', (), '', True, 'import numpy as np\n'), ((100, 23, 100, 61), 'intra_comp_g.comp_r', 'comp_r', ({(100, 30, 100, 40): 'ext_dert__', (100, 42, 100, 45): 'fig', (100, 47, 100, 50): 'fcr', (100, 52, 100, 60): 'ext_mask'}, {}), '(ext_dert__, fig, fcr, ext_mask)', False, 'from intra_comp_g import comp_g, comp_r\n'), ((102, 23, 102, 51), 'intra_comp_g.comp_g', 'comp_g', ({(102, 30, 102, 40): 'ext_dert__', (102, 42, 102, 50): 'ext_mask'}, {}), '(ext_dert__, ext_mask)', False, 'from intra_comp_g import comp_g, comp_r\n'), ((151, 19, 151, 56), 'class_stream.BlobStreamer', 'BlobStreamer', ({(151, 32, 151, 41): 'CDeepBlob', (151, 43, 151, 49): 'crit__', (151, 51, 151, 55): 'mask'}, {}), '(CDeepBlob, crit__, mask)', False, 'from class_stream import BlobStreamer\n'), ((154, 19, 154, 56), 'class_stream.BlobStreamer', 'BlobStreamer', ({(154, 32, 154, 41): 'CDeepBlob', (154, 43, 154, 49): 'crit__', (154, 51, 154, 55): 'mask'}, {}), '(CDeepBlob, crit__, mask)', False, 'from class_stream import BlobStreamer\n'), ((157, 19, 157, 36), 'class_bind.AdjBinder', 'AdjBinder', ({(157, 29, 157, 35): 'CDeepP'}, {}), '(CDeepP)', False, 'from class_bind import AdjBinder\n'), ((384, 15, 384, 54), 'numpy.ones', 'np.ones', (), '', True, 'import numpy as np\n'), ((130, 26, 130, 84), 'itertools.zip_longest', 'zip_longest', (), '', False, 'from itertools import zip_longest\n')] |
felipeek/bullet3 | examples/pybullet/gym/pybullet_envs/minitaur/envs/env_randomizers/minitaur_terrain_randomizer.py | 6a59241074720e9df119f2f86bc01765917feb1e | """Generates a random terrain at Minitaur gym environment reset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
parentdir = os.path.dirname(os.path.dirname(parentdir))
os.sys.path.insert(0, parentdir)
import itertools
import math
import enum
import numpy as np
from pybullet_envs.minitaur.envs import env_randomizer_base
_GRID_LENGTH = 15
_GRID_WIDTH = 10
_MAX_SAMPLE_SIZE = 30
_MIN_BLOCK_DISTANCE = 0.7
_MAX_BLOCK_LENGTH = _MIN_BLOCK_DISTANCE
_MIN_BLOCK_LENGTH = _MAX_BLOCK_LENGTH / 2
_MAX_BLOCK_HEIGHT = 0.05
_MIN_BLOCK_HEIGHT = _MAX_BLOCK_HEIGHT / 2
class PoissonDisc2D(object):
"""Generates 2D points using Poisson disk sampling method.
Implements the algorithm described in:
http://www.cs.ubc.ca/~rbridson/docs/bridson-siggraph07-poissondisk.pdf
Unlike the uniform sampling method that creates small clusters of points,
Poisson disk method enforces the minimum distance between points and is more
suitable for generating a spatial distribution of non-overlapping objects.
"""
def __init__(self, grid_length, grid_width, min_radius, max_sample_size):
"""Initializes the algorithm.
Args:
grid_length: The length of the bounding square in which points are
sampled.
grid_width: The width of the bounding square in which points are
sampled.
min_radius: The minimum distance between any pair of points.
max_sample_size: The maximum number of sample points around a active site.
See details in the algorithm description.
"""
self._cell_length = min_radius / math.sqrt(2)
self._grid_length = grid_length
self._grid_width = grid_width
self._grid_size_x = int(grid_length / self._cell_length) + 1
self._grid_size_y = int(grid_width / self._cell_length) + 1
self._min_radius = min_radius
self._max_sample_size = max_sample_size
# Flattern the 2D grid as an 1D array. The grid is used for fast nearest
# point searching.
self._grid = [None] * self._grid_size_x * self._grid_size_y
# Generate the first sample point and set it as an active site.
first_sample = np.array(np.random.random_sample(2)) * [grid_length, grid_width]
self._active_list = [first_sample]
# Also store the sample point in the grid.
self._grid[self._point_to_index_1d(first_sample)] = first_sample
def _point_to_index_1d(self, point):
"""Computes the index of a point in the grid array.
Args:
point: A 2D point described by its coordinates (x, y).
Returns:
The index of the point within the self._grid array.
"""
return self._index_2d_to_1d(self._point_to_index_2d(point))
def _point_to_index_2d(self, point):
"""Computes the 2D index (aka cell ID) of a point in the grid.
Args:
point: A 2D point (list) described by its coordinates (x, y).
Returns:
x_index: The x index of the cell the point belongs to.
y_index: The y index of the cell the point belongs to.
"""
x_index = int(point[0] / self._cell_length)
y_index = int(point[1] / self._cell_length)
return x_index, y_index
def _index_2d_to_1d(self, index2d):
"""Converts the 2D index to the 1D position in the grid array.
Args:
index2d: The 2D index of a point (aka the cell ID) in the grid.
Returns:
The 1D position of the cell within the self._grid array.
"""
return index2d[0] + index2d[1] * self._grid_size_x
def _is_in_grid(self, point):
"""Checks if the point is inside the grid boundary.
Args:
point: A 2D point (list) described by its coordinates (x, y).
Returns:
Whether the point is inside the grid.
"""
return (0 <= point[0] < self._grid_length) and (0 <= point[1] < self._grid_width)
def _is_in_range(self, index2d):
"""Checks if the cell ID is within the grid.
Args:
index2d: The 2D index of a point (aka the cell ID) in the grid.
Returns:
Whether the cell (2D index) is inside the grid.
"""
return (0 <= index2d[0] < self._grid_size_x) and (0 <= index2d[1] < self._grid_size_y)
def _is_close_to_existing_points(self, point):
"""Checks if the point is close to any already sampled (and stored) points.
Args:
point: A 2D point (list) described by its coordinates (x, y).
Returns:
True iff the distance of the point to any existing points is smaller than
the min_radius
"""
px, py = self._point_to_index_2d(point)
# Now we can check nearby cells for existing points
for neighbor_cell in itertools.product(xrange(px - 1, px + 2), xrange(py - 1, py + 2)):
if not self._is_in_range(neighbor_cell):
continue
maybe_a_point = self._grid[self._index_2d_to_1d(neighbor_cell)]
if maybe_a_point is not None and np.linalg.norm(maybe_a_point - point) < self._min_radius:
return True
return False
def sample(self):
"""Samples new points around some existing point.
Removes the sampling base point and also stores the new jksampled points if
they are far enough from all existing points.
"""
active_point = self._active_list.pop()
for _ in xrange(self._max_sample_size):
# Generate random points near the current active_point between the radius
random_radius = np.random.uniform(self._min_radius, 2 * self._min_radius)
random_angle = np.random.uniform(0, 2 * math.pi)
# The sampled 2D points near the active point
sample = random_radius * np.array([np.cos(random_angle),
np.sin(random_angle)]) + active_point
if not self._is_in_grid(sample):
continue
if self._is_close_to_existing_points(sample):
continue
self._active_list.append(sample)
self._grid[self._point_to_index_1d(sample)] = sample
def generate(self):
"""Generates the Poisson disc distribution of 2D points.
Although the while loop looks scary, the algorithm is in fact O(N), where N
is the number of cells within the grid. When we sample around a base point
(in some base cell), new points will not be pushed into the base cell
because of the minimum distance constraint. Once the current base point is
removed, all future searches cannot start from within the same base cell.
Returns:
All sampled points. The points are inside the quare [0, grid_length] x [0,
grid_width]
"""
while self._active_list:
self.sample()
all_sites = []
for p in self._grid:
if p is not None:
all_sites.append(p)
return all_sites
class TerrainType(enum.Enum):
"""The randomzied terrain types we can use in the gym env."""
RANDOM_BLOCKS = 1
TRIANGLE_MESH = 2
class MinitaurTerrainRandomizer(env_randomizer_base.EnvRandomizerBase):
"""Generates an uneven terrain in the gym env."""
def __init__(self,
terrain_type=TerrainType.TRIANGLE_MESH,
mesh_filename="robotics/reinforcement_learning/minitaur/envs/testdata/"
"triangle_mesh_terrain/terrain9735.obj",
mesh_scale=None):
"""Initializes the randomizer.
Args:
terrain_type: Whether to generate random blocks or load a triangle mesh.
mesh_filename: The mesh file to be used. The mesh will only be loaded if
terrain_type is set to TerrainType.TRIANGLE_MESH.
mesh_scale: the scaling factor for the triangles in the mesh file.
"""
self._terrain_type = terrain_type
self._mesh_filename = mesh_filename
self._mesh_scale = mesh_scale if mesh_scale else [1.0, 1.0, 0.3]
def randomize_env(self, env):
"""Generate a random terrain for the current env.
Args:
env: A minitaur gym environment.
"""
if self._terrain_type is TerrainType.TRIANGLE_MESH:
self._load_triangle_mesh(env)
if self._terrain_type is TerrainType.RANDOM_BLOCKS:
self._generate_convex_blocks(env)
def _load_triangle_mesh(self, env):
"""Represents the random terrain using a triangle mesh.
It is possible for Minitaur leg to stuck at the common edge of two triangle
pieces. To prevent this from happening, we recommend using hard contacts
(or high stiffness values) for Minitaur foot in sim.
Args:
env: A minitaur gym environment.
"""
env.pybullet_client.removeBody(env.ground_id)
terrain_collision_shape_id = env.pybullet_client.createCollisionShape(
shapeType=env.pybullet_client.GEOM_MESH,
fileName=self._mesh_filename,
flags=1,
meshScale=self._mesh_scale)
env.ground_id = env.pybullet_client.createMultiBody(
baseMass=0, baseCollisionShapeIndex=terrain_collision_shape_id, basePosition=[0, 0, 0])
def _generate_convex_blocks(self, env):
"""Adds random convex blocks to the flat ground.
We use the Possion disk algorithm to add some random blocks on the ground.
Possion disk algorithm sets the minimum distance between two sampling
points, thus voiding the clustering effect in uniform N-D distribution.
Args:
env: A minitaur gym environment.
"""
poisson_disc = PoissonDisc2D(_GRID_LENGTH, _GRID_WIDTH, _MIN_BLOCK_DISTANCE, _MAX_SAMPLE_SIZE)
block_centers = poisson_disc.generate()
for center in block_centers:
# We want the blocks to be in front of the robot.
shifted_center = np.array(center) - [2, _GRID_WIDTH / 2]
# Do not place blocks near the point [0, 0], where the robot will start.
if abs(shifted_center[0]) < 1.0 and abs(shifted_center[1]) < 1.0:
continue
half_length = np.random.uniform(_MIN_BLOCK_LENGTH, _MAX_BLOCK_LENGTH) / (2 * math.sqrt(2))
half_height = np.random.uniform(_MIN_BLOCK_HEIGHT, _MAX_BLOCK_HEIGHT) / 2
box_id = env.pybullet_client.createCollisionShape(
env.pybullet_client.GEOM_BOX, halfExtents=[half_length, half_length, half_height])
env.pybullet_client.createMultiBody(
baseMass=0,
baseCollisionShapeIndex=box_id,
basePosition=[shifted_center[0], shifted_center[1], half_height])
| [((11, 0, 11, 32), 'os.sys.path.insert', 'os.sys.path.insert', ({(11, 19, 11, 20): '(0)', (11, 22, 11, 31): 'parentdir'}, {}), '(0, parentdir)', False, 'import os, inspect\n'), ((9, 28, 9, 55), 'os.path.dirname', 'os.path.dirname', ({(9, 44, 9, 54): 'currentdir'}, {}), '(currentdir)', False, 'import os, inspect\n'), ((10, 28, 10, 54), 'os.path.dirname', 'os.path.dirname', ({(10, 44, 10, 53): 'parentdir'}, {}), '(parentdir)', False, 'import os, inspect\n'), ((8, 61, 8, 83), 'inspect.currentframe', 'inspect.currentframe', ({}, {}), '()', False, 'import os, inspect\n'), ((52, 37, 52, 49), 'math.sqrt', 'math.sqrt', ({(52, 47, 52, 48): '(2)'}, {}), '(2)', False, 'import math\n'), ((162, 22, 162, 79), 'numpy.random.uniform', 'np.random.uniform', ({(162, 40, 162, 56): 'self._min_radius', (162, 58, 162, 78): '2 * self._min_radius'}, {}), '(self._min_radius, 2 * self._min_radius)', True, 'import numpy as np\n'), ((163, 21, 163, 54), 'numpy.random.uniform', 'np.random.uniform', ({(163, 39, 163, 40): '0', (163, 42, 163, 53): '2 * math.pi'}, {}), '(0, 2 * math.pi)', True, 'import numpy as np\n'), ((65, 28, 65, 54), 'numpy.random.random_sample', 'np.random.random_sample', ({(65, 52, 65, 53): '(2)'}, {}), '(2)', True, 'import numpy as np\n'), ((278, 23, 278, 39), 'numpy.array', 'np.array', ({(278, 32, 278, 38): 'center'}, {}), '(center)', True, 'import numpy as np\n'), ((283, 20, 283, 75), 'numpy.random.uniform', 'np.random.uniform', ({(283, 38, 283, 55): '_MIN_BLOCK_LENGTH', (283, 57, 283, 74): '_MAX_BLOCK_LENGTH'}, {}), '(_MIN_BLOCK_LENGTH, _MAX_BLOCK_LENGTH)', True, 'import numpy as np\n'), ((284, 20, 284, 75), 'numpy.random.uniform', 'np.random.uniform', ({(284, 38, 284, 55): '_MIN_BLOCK_HEIGHT', (284, 57, 284, 74): '_MAX_BLOCK_HEIGHT'}, {}), '(_MIN_BLOCK_HEIGHT, _MAX_BLOCK_HEIGHT)', True, 'import numpy as np\n'), ((148, 39, 148, 76), 'numpy.linalg.norm', 'np.linalg.norm', ({(148, 54, 148, 75): '(maybe_a_point - point)'}, {}), '(maybe_a_point - point)', True, 'import numpy as np\n'), ((283, 83, 283, 95), 'math.sqrt', 'math.sqrt', ({(283, 93, 283, 94): '(2)'}, {}), '(2)', False, 'import math\n'), ((166, 41, 166, 61), 'numpy.cos', 'np.cos', ({(166, 48, 166, 60): 'random_angle'}, {}), '(random_angle)', True, 'import numpy as np\n'), ((167, 41, 167, 61), 'numpy.sin', 'np.sin', ({(167, 48, 167, 60): 'random_angle'}, {}), '(random_angle)', True, 'import numpy as np\n')] |
furious-luke/polecat | polecat/db/sql/expression/values.py | 7be5110f76dc42b15c922c1bb7d49220e916246d | from functools import partial
from polecat.db.query import query as query_module
from psycopg2.sql import SQL, Placeholder
from .expression import Expression
class Values(Expression):
def __init__(self, values, relation=None):
self.values = values
self.relation = relation
self.keyword = 'VALUES'
def to_sql(self):
if isinstance(self.values, query_module.Values):
get_values_sql = partial(
self.get_values_sql_from_values, self.values
)
else:
get_values_sql = partial(
self.get_values_sql_from_dict, self.values
)
return self.get_values_sql(get_values_sql)
def get_values_sql(self, get_values_sql):
values_sql, values_args = get_values_sql()
joined_sql = SQL(', ').join(
SQL('({})').format(
SQL(', ').join(row_sql)
)
for row_sql in values_sql
)
return SQL('%s {}' % self.keyword).format(joined_sql), values_args
def get_values_sql_from_values(self, values):
column_values_sql = []
column_values = ()
for row in values.iter_rows():
row_values_sql = []
for column_name, column_value in row:
value_sql, value = self.value_to_sql(column_value, column_name)
row_values_sql.append(value_sql)
column_values += value
column_values_sql.append(row_values_sql)
return column_values_sql, column_values
def get_values_sql_from_dict(self, values_dict):
column_values_sql = []
column_values = ()
for column_name, column_value in values_dict.items():
value_sql, value = self.value_to_sql(column_value, column_name)
column_values_sql.append(value_sql)
column_values += value
return (column_values_sql,), column_values
def value_to_sql(self, value, column_name=None):
if isinstance(value, Expression):
sql, args = value.to_sql()
return SQL('{}').format(sql), args
else:
if self.relation and column_name:
column = self.relation.get_column(column_name)
value = column.to_db_value(value)
return Placeholder(), (value,)
def iter_column_names(self):
if isinstance(self.values, dict):
return self.values.keys()
else:
return self.values.iter_column_names()
| [((17, 29, 19, 13), 'functools.partial', 'partial', ({(18, 16, 18, 47): 'self.get_values_sql_from_values', (18, 49, 18, 60): 'self.values'}, {}), '(self.get_values_sql_from_values, self.values)', False, 'from functools import partial\n'), ((21, 29, 23, 13), 'functools.partial', 'partial', ({(22, 16, 22, 45): 'self.get_values_sql_from_dict', (22, 47, 22, 58): 'self.values'}, {}), '(self.get_values_sql_from_dict, self.values)', False, 'from functools import partial\n'), ((28, 21, 28, 30), 'psycopg2.sql.SQL', 'SQL', ({(28, 25, 28, 29): '""", """'}, {}), "(', ')", False, 'from psycopg2.sql import SQL, Placeholder\n'), ((65, 19, 65, 32), 'psycopg2.sql.Placeholder', 'Placeholder', ({}, {}), '()', False, 'from psycopg2.sql import SQL, Placeholder\n'), ((34, 15, 34, 42), 'psycopg2.sql.SQL', 'SQL', ({(34, 19, 34, 41): "('%s {}' % self.keyword)"}, {}), "('%s {}' % self.keyword)", False, 'from psycopg2.sql import SQL, Placeholder\n'), ((29, 12, 29, 23), 'psycopg2.sql.SQL', 'SQL', ({(29, 16, 29, 22): '"""({})"""'}, {}), "('({})')", False, 'from psycopg2.sql import SQL, Placeholder\n'), ((60, 19, 60, 28), 'psycopg2.sql.SQL', 'SQL', ({(60, 23, 60, 27): '"""{}"""'}, {}), "('{}')", False, 'from psycopg2.sql import SQL, Placeholder\n'), ((30, 16, 30, 25), 'psycopg2.sql.SQL', 'SQL', ({(30, 20, 30, 24): '""", """'}, {}), "(', ')", False, 'from psycopg2.sql import SQL, Placeholder\n')] |
swilcox/2019adventofcode | python/day3p1.py | b67261aae74805ba8c2f4b72f09dd79277224ebb | # 2019 advent day 3
MOVES = {
'R': (lambda x: (x[0], x[1] + 1)),
'L': (lambda x: (x[0], x[1] - 1)),
'U': (lambda x: (x[0] + 1, x[1])),
'D': (lambda x: (x[0] - 1, x[1])),
}
def build_route(directions: list) -> list:
current_location = (0, 0)
route = []
for d in directions:
direction, amount = d[0], int(d[1:])
for _ in range(amount):
current_location = MOVES[direction](current_location)
route.append(current_location)
return route
def find_intersections(r1: list, r2: list) -> set:
return set(r1).intersection(set(r2))
def find_shortest_manhattan_distance(points: set) -> int:
return min((abs(p[0]) + abs(p[1])) for p in points)
#R1 = 'R75,D30,R83,U83,L12,D49,R71,U7,L72'
#R2 = 'U62,R66,U55,R34,D71,R55,D58,R83'
#R1 = 'R98,U47,R26,D63,R33,U87,L62,D20,R33,U53,R51'
#R2 = 'U98,R91,D20,R16,D67,R40,U7,R15,U6,R7'
def main():
#route1 = build_route(R1.split(','))
#route2 = build_route(R2.split(','))
with open('day3input.txt') as f:
line1, line2 = f.readlines()
route1 = build_route(line1.strip().split(','))
route2 = build_route(line2.strip().split(','))
print(find_shortest_manhattan_distance(find_intersections(route1, route2)))
if __name__ == "__main__":
main()
| [] |
JavDomGom/mist | examples/demo/python/catalog.py | 83ae9f67df61ff2387a7d424cff0f8591a6a645f | import asyncio
async def searchDomains(domain, q):
domains = []
proc = await asyncio.create_subprocess_shell(f"dnsrecon -d {domain} -t crt", stdout=asyncio.subprocess.PIPE)
line = True
while line:
line = (await proc.stdout.readline()).decode('utf-8')
fields = line.split()
if len(fields)>1 and fields[1]=="A":
if q:
await q.put(fields[2])
domains.append(fields[2])
return domains
async def findOpenPorts(ip, ports, q):
openPorts = []
proc = await asyncio.create_subprocess_shell(f"nmap -p {ports} --open {ip}",stdout=asyncio.subprocess.PIPE)
line = True
while line:
line = (await proc.stdout.readline()).decode('utf-8')
fields = line.split()
if len(fields)>1 and fields[1]=="open":
openPort = fields[0].split("/")
if q:
await q.put({"ip": ip, "port": openPort[0], "protocol": openPort[1]})
openPorts.append({"port": openPort[0], "protocol": openPort[1]})
return openPorts
| [((5, 17, 5, 112), 'asyncio.create_subprocess_shell', 'asyncio.create_subprocess_shell', (), '', False, 'import asyncio\n'), ((18, 17, 18, 111), 'asyncio.create_subprocess_shell', 'asyncio.create_subprocess_shell', (), '', False, 'import asyncio\n')] |
CiscoISE/ciscoisesdk | tests/api/v3_1_0/test_security_groups_acls.py | 860b0fc7cc15d0c2a39c64608195a7ab3d5f4885 | # -*- coding: utf-8 -*-
"""IdentityServicesEngineAPI security_groups_acls API fixtures and tests.
Copyright (c) 2021 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import pytest
from fastjsonschema.exceptions import JsonSchemaException
from ciscoisesdk.exceptions import MalformedRequest
from ciscoisesdk.exceptions import ciscoisesdkException
from tests.environment import IDENTITY_SERVICES_ENGINE_VERSION
pytestmark = pytest.mark.skipif(IDENTITY_SERVICES_ENGINE_VERSION != '3.1.0', reason='version does not match')
def is_valid_get_security_groups_acl_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_a50d1bd34d5f593aadf8eb02083c67b0_v3_1_0').validate(obj.response)
return True
def get_security_groups_acl_by_id(api):
endpoint_result = api.security_groups_acls.get_security_groups_acl_by_id(
id='string'
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_get_security_groups_acl_by_id(api, validator):
try:
assert is_valid_get_security_groups_acl_by_id(
validator,
get_security_groups_acl_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def get_security_groups_acl_by_id_default(api):
endpoint_result = api.security_groups_acls.get_security_groups_acl_by_id(
id='string'
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_get_security_groups_acl_by_id_default(api, validator):
try:
assert is_valid_get_security_groups_acl_by_id(
validator,
get_security_groups_acl_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_update_security_groups_acl_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_afc81cd1e25c50319f75606b97c23b3d_v3_1_0').validate(obj.response)
return True
def update_security_groups_acl_by_id(api):
endpoint_result = api.security_groups_acls.update_security_groups_acl_by_id(
aclcontent='string',
active_validation=False,
description='string',
generation_id='string',
id='string',
ip_version='string',
is_read_only=True,
modelled_content={},
name='string',
payload=None
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_update_security_groups_acl_by_id(api, validator):
try:
assert is_valid_update_security_groups_acl_by_id(
validator,
update_security_groups_acl_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def update_security_groups_acl_by_id_default(api):
endpoint_result = api.security_groups_acls.update_security_groups_acl_by_id(
active_validation=False,
id='string',
aclcontent=None,
description=None,
generation_id=None,
ip_version=None,
is_read_only=None,
modelled_content=None,
name=None,
payload=None
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_update_security_groups_acl_by_id_default(api, validator):
try:
assert is_valid_update_security_groups_acl_by_id(
validator,
update_security_groups_acl_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_delete_security_groups_acl_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_b0a2bea8bfec52b68663ef3f7ac6d7a7_v3_1_0').validate(obj.response)
return True
def delete_security_groups_acl_by_id(api):
endpoint_result = api.security_groups_acls.delete_security_groups_acl_by_id(
id='string'
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_delete_security_groups_acl_by_id(api, validator):
try:
assert is_valid_delete_security_groups_acl_by_id(
validator,
delete_security_groups_acl_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def delete_security_groups_acl_by_id_default(api):
endpoint_result = api.security_groups_acls.delete_security_groups_acl_by_id(
id='string'
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_delete_security_groups_acl_by_id_default(api, validator):
try:
assert is_valid_delete_security_groups_acl_by_id(
validator,
delete_security_groups_acl_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_get_security_groups_acl(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_999b22d6ad9f595ab7e3eee5cf44de8a_v3_1_0').validate(obj.response)
return True
def get_security_groups_acl(api):
endpoint_result = api.security_groups_acls.get_security_groups_acl(
filter='value1,value2',
filter_type='string',
page=0,
size=0,
sortasc='string',
sortdsc='string'
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_get_security_groups_acl(api, validator):
try:
assert is_valid_get_security_groups_acl(
validator,
get_security_groups_acl(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def get_security_groups_acl_default(api):
endpoint_result = api.security_groups_acls.get_security_groups_acl(
filter=None,
filter_type=None,
page=None,
size=None,
sortasc=None,
sortdsc=None
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_get_security_groups_acl_default(api, validator):
try:
assert is_valid_get_security_groups_acl(
validator,
get_security_groups_acl_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_create_security_groups_acl(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_9ab61f24bdaf508590f7686e1130913f_v3_1_0').validate(obj.response)
return True
def create_security_groups_acl(api):
endpoint_result = api.security_groups_acls.create_security_groups_acl(
aclcontent='string',
active_validation=False,
description='string',
generation_id='string',
ip_version='string',
is_read_only=True,
modelled_content={},
name='string',
payload=None
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_create_security_groups_acl(api, validator):
try:
assert is_valid_create_security_groups_acl(
validator,
create_security_groups_acl(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def create_security_groups_acl_default(api):
endpoint_result = api.security_groups_acls.create_security_groups_acl(
active_validation=False,
aclcontent=None,
description=None,
generation_id=None,
ip_version=None,
is_read_only=None,
modelled_content=None,
name=None,
payload=None
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_create_security_groups_acl_default(api, validator):
try:
assert is_valid_create_security_groups_acl(
validator,
create_security_groups_acl_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_get_version(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_6704e67a1131578aa794d8377da9a1de_v3_1_0').validate(obj.response)
return True
def get_version(api):
endpoint_result = api.security_groups_acls.get_version(
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_get_version(api, validator):
try:
assert is_valid_get_version(
validator,
get_version(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def get_version_default(api):
endpoint_result = api.security_groups_acls.get_version(
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_get_version_default(api, validator):
try:
assert is_valid_get_version(
validator,
get_version_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_bulk_request_for_security_groups_acl(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_7da250e23ac05e6a8dcf32a81effcee9_v3_1_0').validate(obj.response)
return True
def bulk_request_for_security_groups_acl(api):
endpoint_result = api.security_groups_acls.bulk_request_for_security_groups_acl(
active_validation=False,
operation_type='string',
payload=None,
resource_media_type='string'
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_bulk_request_for_security_groups_acl(api, validator):
try:
assert is_valid_bulk_request_for_security_groups_acl(
validator,
bulk_request_for_security_groups_acl(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def bulk_request_for_security_groups_acl_default(api):
endpoint_result = api.security_groups_acls.bulk_request_for_security_groups_acl(
active_validation=False,
operation_type=None,
payload=None,
resource_media_type=None
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_bulk_request_for_security_groups_acl_default(api, validator):
try:
assert is_valid_bulk_request_for_security_groups_acl(
validator,
bulk_request_for_security_groups_acl_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_monitor_bulk_status_security_groups_acl(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_07af5ee576605a5a915d888924c1e804_v3_1_0').validate(obj.response)
return True
def monitor_bulk_status_security_groups_acl(api):
endpoint_result = api.security_groups_acls.monitor_bulk_status_security_groups_acl(
bulkid='string'
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_monitor_bulk_status_security_groups_acl(api, validator):
try:
assert is_valid_monitor_bulk_status_security_groups_acl(
validator,
monitor_bulk_status_security_groups_acl(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def monitor_bulk_status_security_groups_acl_default(api):
endpoint_result = api.security_groups_acls.monitor_bulk_status_security_groups_acl(
bulkid='string'
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_monitor_bulk_status_security_groups_acl_default(api, validator):
try:
assert is_valid_monitor_bulk_status_security_groups_acl(
validator,
monitor_bulk_status_security_groups_acl_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
| [((30, 13, 30, 109), 'pytest.mark.skipif', 'pytest.mark.skipif', (), '', False, 'import pytest\n'), ((59, 13, 59, 67), 'pytest.raises', 'pytest.raises', ({(59, 27, 59, 66): '(JsonSchemaException, MalformedRequest)'}, {}), '((JsonSchemaException, MalformedRequest))', False, 'import pytest\n'), ((79, 13, 79, 78), 'pytest.raises', 'pytest.raises', ({(79, 27, 79, 77): '(JsonSchemaException, MalformedRequest, TypeError)'}, {}), '((JsonSchemaException, MalformedRequest, TypeError))', False, 'import pytest\n'), ((118, 13, 118, 67), 'pytest.raises', 'pytest.raises', ({(118, 27, 118, 66): '(JsonSchemaException, MalformedRequest)'}, {}), '((JsonSchemaException, MalformedRequest))', False, 'import pytest\n'), ((147, 13, 147, 78), 'pytest.raises', 'pytest.raises', ({(147, 27, 147, 77): '(JsonSchemaException, MalformedRequest, TypeError)'}, {}), '((JsonSchemaException, MalformedRequest, TypeError))', False, 'import pytest\n'), ((177, 13, 177, 67), 'pytest.raises', 'pytest.raises', ({(177, 27, 177, 66): '(JsonSchemaException, MalformedRequest)'}, {}), '((JsonSchemaException, MalformedRequest))', False, 'import pytest\n'), ((197, 13, 197, 78), 'pytest.raises', 'pytest.raises', ({(197, 27, 197, 77): '(JsonSchemaException, MalformedRequest, TypeError)'}, {}), '((JsonSchemaException, MalformedRequest, TypeError))', False, 'import pytest\n'), ((232, 13, 232, 67), 'pytest.raises', 'pytest.raises', ({(232, 27, 232, 66): '(JsonSchemaException, MalformedRequest)'}, {}), '((JsonSchemaException, MalformedRequest))', False, 'import pytest\n'), ((257, 13, 257, 78), 'pytest.raises', 'pytest.raises', ({(257, 27, 257, 77): '(JsonSchemaException, MalformedRequest, TypeError)'}, {}), '((JsonSchemaException, MalformedRequest, TypeError))', False, 'import pytest\n'), ((295, 13, 295, 67), 'pytest.raises', 'pytest.raises', ({(295, 27, 295, 66): '(JsonSchemaException, MalformedRequest)'}, {}), '((JsonSchemaException, MalformedRequest))', False, 'import pytest\n'), ((323, 13, 323, 78), 'pytest.raises', 'pytest.raises', ({(323, 27, 323, 77): '(JsonSchemaException, MalformedRequest, TypeError)'}, {}), '((JsonSchemaException, MalformedRequest, TypeError))', False, 'import pytest\n'), ((353, 13, 353, 67), 'pytest.raises', 'pytest.raises', ({(353, 27, 353, 66): '(JsonSchemaException, MalformedRequest)'}, {}), '((JsonSchemaException, MalformedRequest))', False, 'import pytest\n'), ((373, 13, 373, 78), 'pytest.raises', 'pytest.raises', ({(373, 27, 373, 77): '(JsonSchemaException, MalformedRequest, TypeError)'}, {}), '((JsonSchemaException, MalformedRequest, TypeError))', False, 'import pytest\n'), ((406, 13, 406, 67), 'pytest.raises', 'pytest.raises', ({(406, 27, 406, 66): '(JsonSchemaException, MalformedRequest)'}, {}), '((JsonSchemaException, MalformedRequest))', False, 'import pytest\n'), ((429, 13, 429, 78), 'pytest.raises', 'pytest.raises', ({(429, 27, 429, 77): '(JsonSchemaException, MalformedRequest, TypeError)'}, {}), '((JsonSchemaException, MalformedRequest, TypeError))', False, 'import pytest\n'), ((459, 13, 459, 67), 'pytest.raises', 'pytest.raises', ({(459, 27, 459, 66): '(JsonSchemaException, MalformedRequest)'}, {}), '((JsonSchemaException, MalformedRequest))', False, 'import pytest\n'), ((479, 13, 479, 78), 'pytest.raises', 'pytest.raises', ({(479, 27, 479, 77): '(JsonSchemaException, MalformedRequest, TypeError)'}, {}), '((JsonSchemaException, MalformedRequest, TypeError))', False, 'import pytest\n')] |
stefanheyder/geomstats | geomstats/geometry/riemannian_metric.py | c4e6d959db7b1bcc99b00b535b8aa5d832b62e28 | """Riemannian and pseudo-Riemannian metrics."""
import math
import warnings
import autograd
import geomstats.backend as gs
from geomstats.geometry.connection import Connection
EPSILON = 1e-4
N_CENTERS = 10
TOLERANCE = 1e-5
N_REPETITIONS = 20
N_MAX_ITERATIONS = 50000
N_STEPS = 10
def loss(y_pred, y_true, metric):
"""Compute loss function between prediction and ground truth.
Loss function given by a Riemannian metric,
expressed as the squared geodesic distance between the prediction
and the ground truth.
Parameters
----------
y_pred
y_true
metric
Returns
-------
loss
"""
loss = metric.squared_dist(y_pred, y_true)
return loss
def grad(y_pred, y_true, metric):
"""Closed-form for the gradient of the loss function."""
tangent_vec = metric.log(base_point=y_pred, point=y_true)
grad_vec = - 2. * tangent_vec
inner_prod_mat = metric.inner_product_matrix(base_point=y_pred)
grad = gs.einsum('ni,nij->ni',
grad_vec,
gs.transpose(inner_prod_mat, axes=(0, 2, 1)))
return grad
class RiemannianMetric(Connection):
"""Class for Riemannian and pseudo-Riemannian metrics."""
def __init__(self, dimension, signature=None):
assert isinstance(dimension, int) or dimension == math.inf
assert dimension > 0
super().__init__(dimension=dimension)
self.signature = signature
def inner_product_matrix(self, base_point=None):
"""Inner product matrix at the tangent space at a base point.
Parameters
----------
base_point : array-like, shape=[n_samples, dimension], optional
"""
raise NotImplementedError(
'The computation of the inner product matrix'
' is not implemented.')
def inner_product_inverse_matrix(self, base_point=None):
"""Inner product matrix at the tangent space at a base point.
Parameters
----------
base_point : array-like, shape=[n_samples, dimension], optional
"""
metric_matrix = self.inner_product_matrix(base_point)
cometric_matrix = gs.linalg.inv(metric_matrix)
return cometric_matrix
def inner_product_derivative_matrix(self, base_point=None):
"""Compute derivative of the inner prod matrix at base point.
Parameters
----------
base_point : array-like, shape=[n_samples, dimension], optional
"""
metric_derivative = autograd.jacobian(self.inner_product_matrix)
return metric_derivative(base_point)
def christoffels(self, base_point):
"""Compute Christoffel symbols associated with the connection.
Parameters
----------
base_point: array-like, shape=[n_samples, dimension]
Returns
-------
christoffels: array-like,
shape=[n_samples, dimension, dimension, dimension]
"""
cometric_mat_at_point = self.inner_product_inverse_matrix(base_point)
metric_derivative_at_point = self.inner_product_derivative_matrix(
base_point)
term_1 = gs.einsum('nim,nmkl->nikl',
cometric_mat_at_point,
metric_derivative_at_point)
term_2 = gs.einsum('nim,nmlk->nilk',
cometric_mat_at_point,
metric_derivative_at_point)
term_3 = - gs.einsum('nim,nklm->nikl',
cometric_mat_at_point,
metric_derivative_at_point)
christoffels = 0.5 * (term_1 + term_2 + term_3)
return christoffels
def inner_product(self, tangent_vec_a, tangent_vec_b, base_point=None):
"""Inner product between two tangent vectors at a base point.
Parameters
----------
tangent_vec_a: array-like, shape=[n_samples, dimension]
or shape=[1, dimension]
tangent_vec_b: array-like, shape=[n_samples, dimension]
or shape=[1, dimension]
base_point: array-like, shape=[n_samples, dimension]
or shape=[1, dimension]
Returns
-------
inner_product : array-like, shape=[n_samples,]
"""
tangent_vec_a = gs.to_ndarray(tangent_vec_a, to_ndim=2)
tangent_vec_b = gs.to_ndarray(tangent_vec_b, to_ndim=2)
n_tangent_vec_a = gs.shape(tangent_vec_a)[0]
n_tangent_vec_b = gs.shape(tangent_vec_b)[0]
inner_prod_mat = self.inner_product_matrix(base_point)
inner_prod_mat = gs.to_ndarray(inner_prod_mat, to_ndim=3)
n_mats = gs.shape(inner_prod_mat)[0]
if n_tangent_vec_a != n_mats:
if n_tangent_vec_a == 1:
tangent_vec_a = gs.squeeze(tangent_vec_a, axis=0)
einsum_str_a = 'j,njk->nk'
elif n_mats == 1:
inner_prod_mat = gs.squeeze(inner_prod_mat, axis=0)
einsum_str_a = 'nj,jk->nk'
else:
raise ValueError('Shape mismatch for einsum.')
else:
einsum_str_a = 'nj,njk->nk'
aux = gs.einsum(einsum_str_a, tangent_vec_a, inner_prod_mat)
n_auxs, _ = gs.shape(aux)
if n_tangent_vec_b != n_auxs:
if n_auxs == 1:
aux = gs.squeeze(aux, axis=0)
einsum_str_b = 'k,nk->n'
elif n_tangent_vec_b == 1:
tangent_vec_b = gs.squeeze(tangent_vec_b, axis=0)
einsum_str_b = 'nk,k->n'
else:
raise ValueError('Shape mismatch for einsum.')
else:
einsum_str_b = 'nk,nk->n'
inner_prod = gs.einsum(einsum_str_b, aux, tangent_vec_b)
inner_prod = gs.to_ndarray(inner_prod, to_ndim=2, axis=1)
assert gs.ndim(inner_prod) == 2, inner_prod.shape
return inner_prod
def squared_norm(self, vector, base_point=None):
"""Compute the square of the norm of a vector.
Squared norm of a vector associated to the inner product
at the tangent space at a base point.
Parameters
----------
vector : array-like, shape=[n_samples, dimension]
base_point : array-like, shape=[n_samples, dimension]
Returns
-------
sq_norm : array-like, shape=[n_samples,]
"""
sq_norm = self.inner_product(vector, vector, base_point)
return sq_norm
def norm(self, vector, base_point=None):
"""Compute norm of a vector.
Norm of a vector associated to the inner product
at the tangent space at a base point.
Note: This only works for positive-definite
Riemannian metrics and inner products.
Parameters
----------
vector : array-like, shape=[n_samples, dimension]
base_point : array-like, shape=[n_samples, dimension]
Returns
-------
norm : array-like, shape=[n_samples,]
"""
sq_norm = self.squared_norm(vector, base_point)
norm = gs.sqrt(sq_norm)
return norm
def geodesic(self, initial_point,
end_point=None, initial_tangent_vec=None,
point_type='vector'):
"""Return the geodesic as function of t.
Geodesic curve defined by either:
- an initial point and an initial tangent vector, or
- an initial point and an end point.
The geodesic is returned as a function parameterized by t.
Parameters
----------
initial_point : array-like, shape=[n_samples, dimension]
end_point : array-like, shape=[n_samples, dimension], optional
initial_tangent_vec : array-like, shape=[n_samples, dimension],
optional
point_type : str, optional
Returns
-------
path : callable
"""
point_ndim = 1
if point_type == 'matrix':
point_ndim = 2
initial_point = gs.to_ndarray(initial_point,
to_ndim=point_ndim + 1)
if end_point is None and initial_tangent_vec is None:
raise ValueError('Specify an end point or an initial tangent '
'vector to define the geodesic.')
if end_point is not None:
end_point = gs.to_ndarray(end_point,
to_ndim=point_ndim + 1)
shooting_tangent_vec = self.log(point=end_point,
base_point=initial_point)
if initial_tangent_vec is not None:
assert gs.allclose(shooting_tangent_vec, initial_tangent_vec)
initial_tangent_vec = shooting_tangent_vec
initial_tangent_vec = gs.array(initial_tangent_vec)
initial_tangent_vec = gs.to_ndarray(initial_tangent_vec,
to_ndim=point_ndim + 1)
def path(t):
"""Generate a function parameterizing the geodesic.
Parameters
----------
t : parameter value of the geodesic
Returns
-------
point_at_time_t : callable
"""
t = gs.cast(t, gs.float32)
t = gs.to_ndarray(t, to_ndim=1)
t = gs.to_ndarray(t, to_ndim=2, axis=1)
new_initial_point = gs.to_ndarray(
initial_point,
to_ndim=point_ndim + 1)
new_initial_tangent_vec = gs.to_ndarray(
initial_tangent_vec,
to_ndim=point_ndim + 1)
if point_type == 'vector':
tangent_vecs = gs.einsum('il,nk->ik',
t,
new_initial_tangent_vec)
elif point_type == 'matrix':
tangent_vecs = gs.einsum('il,nkm->ikm',
t,
new_initial_tangent_vec)
point_at_time_t = self.exp(tangent_vec=tangent_vecs,
base_point=new_initial_point)
return point_at_time_t
return path
def squared_dist(self, point_a, point_b):
"""Squared geodesic distance between two points.
Parameters
----------
point_a : array-like, shape=[n_samples, dimension]
point_b : array-like, shape=[n_samples, dimension]
Returns
-------
sq_dist : array-like, shape=[n_samples,]
"""
log = self.log(point=point_b, base_point=point_a)
sq_dist = self.squared_norm(vector=log, base_point=point_a)
return sq_dist
def dist(self, point_a, point_b):
"""Geodesic distance between two points.
Note: It only works for positive definite
Riemannian metrics.
Parameters
----------
point_a : array-like, shape=[n_samples, dimension]
point_b : array-like, shape=[n_samples, dimension]
Returns
-------
dist : array-like, shape=[n_samples,]
"""
sq_dist = self.squared_dist(point_a, point_b)
dist = gs.sqrt(sq_dist)
return dist
def variance(self,
points,
weights=None,
base_point=None,
point_type='vector'):
"""Variance of (weighted) points wrt a base point.
Parameters
----------
points: array-like, shape=[n_samples, dimension]
weights: array-like, shape=[n_samples, 1], optional
"""
if point_type == 'vector':
points = gs.to_ndarray(points, to_ndim=2)
if point_type == 'matrix':
points = gs.to_ndarray(points, to_ndim=3)
n_points = gs.shape(points)[0]
if weights is None:
weights = gs.ones((n_points, 1))
weights = gs.array(weights)
weights = gs.to_ndarray(weights, to_ndim=2, axis=1)
sum_weights = gs.sum(weights)
if base_point is None:
base_point = self.mean(points, weights)
variance = 0.
sq_dists = self.squared_dist(base_point, points)
variance += gs.einsum('nk,nj->j', weights, sq_dists)
variance = gs.array(variance)
variance /= sum_weights
variance = gs.to_ndarray(variance, to_ndim=1)
variance = gs.to_ndarray(variance, to_ndim=2, axis=1)
return variance
def mean(self, points,
weights=None,
n_max_iterations=32,
epsilon=EPSILON,
point_type='vector',
mean_method='default',
verbose=False):
"""Frechet mean of (weighted) points.
Parameters
----------
points : array-like, shape=[n_samples, dimension]
weights : array-like, shape=[n_samples, 1], optional
verbose : bool, optional
Returns
-------
mean : array-like
the Frechet mean of points, a point on the manifold
"""
if mean_method == 'default':
# TODO(nina): Profile this code to study performance,
# i.e. what to do with sq_dists_between_iterates.
def while_loop_cond(iteration, mean, variance, sq_dist):
result = ~gs.logical_or(
gs.isclose(variance, 0.),
gs.less_equal(sq_dist, epsilon * variance))
return result[0, 0] or iteration == 0
def while_loop_body(iteration, mean, variance, sq_dist):
logs = self.log(point=points, base_point=mean)
tangent_mean = gs.einsum('nk,nj->j', weights, logs)
tangent_mean /= sum_weights
mean_next = self.exp(
tangent_vec=tangent_mean,
base_point=mean)
sq_dist = self.squared_dist(mean_next, mean)
sq_dists_between_iterates.append(sq_dist)
variance = self.variance(points=points,
weights=weights,
base_point=mean_next)
mean = mean_next
iteration += 1
return [iteration, mean, variance, sq_dist]
if point_type == 'vector':
points = gs.to_ndarray(points, to_ndim=2)
if point_type == 'matrix':
points = gs.to_ndarray(points, to_ndim=3)
n_points = gs.shape(points)[0]
if weights is None:
weights = gs.ones((n_points, 1))
weights = gs.array(weights)
weights = gs.to_ndarray(weights, to_ndim=2, axis=1)
sum_weights = gs.sum(weights)
mean = points[0]
if point_type == 'vector':
mean = gs.to_ndarray(mean, to_ndim=2)
if point_type == 'matrix':
mean = gs.to_ndarray(mean, to_ndim=3)
if n_points == 1:
return mean
sq_dists_between_iterates = []
iteration = 0
sq_dist = gs.array([[0.]])
variance = gs.array([[0.]])
last_iteration, mean, variance, sq_dist = gs.while_loop(
lambda i, m, v, sq: while_loop_cond(i, m, v, sq),
lambda i, m, v, sq: while_loop_body(i, m, v, sq),
loop_vars=[iteration, mean, variance, sq_dist],
maximum_iterations=n_max_iterations)
if last_iteration == n_max_iterations:
print('Maximum number of iterations {} reached.'
'The mean may be inaccurate'.format(n_max_iterations))
if verbose:
print('n_iter: {}, final variance: {}, final dist: {}'.format(
last_iteration, variance, sq_dist))
mean = gs.to_ndarray(mean, to_ndim=2)
return mean
if mean_method == 'frechet-poincare-ball':
lr = 1e-3
tau = 5e-3
if len(points) == 1:
return points
iteration = 0
convergence = math.inf
barycenter = points.mean(0, keepdims=True) * 0
while convergence > tau and n_max_iterations > iteration:
iteration += 1
expand_barycenter = gs.repeat(barycenter, points.shape[0], 0)
grad_tangent = 2 * self.log(points, expand_barycenter)
cc_barycenter = self.exp(lr * grad_tangent.sum(0,
keepdims=True),
barycenter)
convergence = self.dist(cc_barycenter, barycenter).max().item()
barycenter = cc_barycenter
if iteration == n_max_iterations:
warnings.warn(
'Maximum number of iterations {} reached. The '
'mean may be inaccurate'.format(n_max_iterations))
return barycenter
def adaptive_gradientdescent_mean(self, points,
weights=None,
n_max_iterations=40,
epsilon=1e-12,
init_points=[],
verbose=False):
"""Compute Frechet mean of (weighted) points using adaptive time-steps.
Frechet mean of (weighted) points using adaptive time-steps
The loss function optimized is ||M_1(x)||_x (where M_1(x) is
the tangent mean at x) rather than the mean-square-distance (MSD)
because this saves computation time.
Parameters
----------
points: array-like, shape=[n_samples, dimension]
weights: array-like, shape=[n_samples, 1], optional
init_points: array-like, shape=[n_init, dimension]
epsilon: tolerance for stopping the gradient descent
verbose: verbose mode printing the surrogate value
epsilon: tolerance for stopping the gradient descent
"""
# TODO(Xavier): This function assumes that all points are lists
# of vectors and not of matrices
n_points = gs.shape(points)[0]
if n_points == 1:
return gs.to_ndarray(points[0], to_ndim=2)
if weights is None:
weights = gs.ones((n_points, 1))
weights = gs.array(weights)
weights = gs.to_ndarray(weights, to_ndim=2, axis=1)
sum_weights = gs.sum(weights)
n_init = len(init_points)
if n_init == 0:
current_mean = points[0]
else:
current_mean = init_points[0]
tau = 1.0
iteration = 0
logs = self.log(point=points, base_point=current_mean)
current_tangent_mean = gs.einsum('nk,nj->j', weights, logs)
current_tangent_mean /= sum_weights
norm_current_tangent_mean = gs.linalg.norm(current_tangent_mean)
while (norm_current_tangent_mean > epsilon
and iteration < n_max_iterations):
iteration = iteration + 1
shooting_vector = gs.to_ndarray(
tau * current_tangent_mean,
to_ndim=2)
next_mean = self.exp(
tangent_vec=shooting_vector,
base_point=current_mean)
logs = self.log(point=points, base_point=next_mean)
next_tangent_mean = gs.einsum('nk,nj->j', weights, logs)
next_tangent_mean /= sum_weights
norm_next_tangent_mean = gs.linalg.norm(next_tangent_mean)
if verbose:
print(
"Iter {0}: tau= {1}, "
"norm_current_tangent_mean = {2}".format(
iter, tau, norm_current_tangent_mean))
if norm_next_tangent_mean < norm_current_tangent_mean:
current_mean = next_mean
current_tangent_mean = next_tangent_mean
norm_current_tangent_mean = norm_next_tangent_mean
tau = max(1.0, 1.0511111 * tau)
else:
tau = tau * 0.8
if iteration == n_max_iterations:
warnings.warn(
'Maximum number of iterations {} reached.'
'The mean may be inaccurate'.format(n_max_iterations))
return gs.to_ndarray(current_mean, to_ndim=2)
def diameter(self, points):
"""Give the distance between two farthest points.
Distance between the two points that are farthest away from each other
in points.
Parameters
----------
points
Returns
-------
diameter
"""
diameter = 0.0
n_points = points.shape[0]
for i in range(n_points - 1):
dist_to_neighbors = self.dist(points[i, :], points[i + 1:, :])
dist_to_farthest_neighbor = gs.amax(dist_to_neighbors)
diameter = gs.maximum(diameter, dist_to_farthest_neighbor)
return diameter
def closest_neighbor_index(self, point, neighbors):
"""Closest neighbor of point among neighbors.
Parameters
----------
point
neighbors
Returns
-------
closest_neighbor_index
"""
dist = self.dist(point, neighbors)
closest_neighbor_index = gs.argmin(dist)
return closest_neighbor_index
| [((51, 21, 51, 65), 'geomstats.backend.transpose', 'gs.transpose', (), '', True, 'import geomstats.backend as gs\n'), ((84, 26, 84, 54), 'geomstats.backend.linalg.inv', 'gs.linalg.inv', ({(84, 40, 84, 53): 'metric_matrix'}, {}), '(metric_matrix)', True, 'import geomstats.backend as gs\n'), ((94, 28, 94, 72), 'autograd.jacobian', 'autograd.jacobian', ({(94, 46, 94, 71): 'self.inner_product_matrix'}, {}), '(self.inner_product_matrix)', False, 'import autograd\n'), ((112, 17, 114, 54), 'geomstats.backend.einsum', 'gs.einsum', ({(112, 27, 112, 43): '"""nim,nmkl->nikl"""', (113, 27, 113, 48): 'cometric_mat_at_point', (114, 27, 114, 53): 'metric_derivative_at_point'}, {}), "('nim,nmkl->nikl', cometric_mat_at_point, metric_derivative_at_point)", True, 'import geomstats.backend as gs\n'), ((115, 17, 117, 54), 'geomstats.backend.einsum', 'gs.einsum', ({(115, 27, 115, 43): '"""nim,nmlk->nilk"""', (116, 27, 116, 48): 'cometric_mat_at_point', (117, 27, 117, 53): 'metric_derivative_at_point'}, {}), "('nim,nmlk->nilk', cometric_mat_at_point, metric_derivative_at_point)", True, 'import geomstats.backend as gs\n'), ((143, 24, 143, 63), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (), '', True, 'import geomstats.backend as gs\n'), ((144, 24, 144, 63), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (), '', True, 'import geomstats.backend as gs\n'), ((149, 25, 149, 65), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (), '', True, 'import geomstats.backend as gs\n'), ((164, 14, 164, 68), 'geomstats.backend.einsum', 'gs.einsum', ({(164, 24, 164, 36): 'einsum_str_a', (164, 38, 164, 51): 'tangent_vec_a', (164, 53, 164, 67): 'inner_prod_mat'}, {}), '(einsum_str_a, tangent_vec_a, inner_prod_mat)', True, 'import geomstats.backend as gs\n'), ((165, 20, 165, 33), 'geomstats.backend.shape', 'gs.shape', ({(165, 29, 165, 32): 'aux'}, {}), '(aux)', True, 'import geomstats.backend as gs\n'), ((179, 21, 179, 64), 'geomstats.backend.einsum', 'gs.einsum', ({(179, 31, 179, 43): 'einsum_str_b', (179, 45, 179, 48): 'aux', (179, 50, 179, 63): 'tangent_vec_b'}, {}), '(einsum_str_b, aux, tangent_vec_b)', True, 'import geomstats.backend as gs\n'), ((180, 21, 180, 65), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (), '', True, 'import geomstats.backend as gs\n'), ((222, 15, 222, 31), 'geomstats.backend.sqrt', 'gs.sqrt', ({(222, 23, 222, 30): 'sq_norm'}, {}), '(sq_norm)', True, 'import geomstats.backend as gs\n'), ((252, 24, 253, 61), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (), '', True, 'import geomstats.backend as gs\n'), ((266, 30, 266, 59), 'geomstats.backend.array', 'gs.array', ({(266, 39, 266, 58): 'initial_tangent_vec'}, {}), '(initial_tangent_vec)', True, 'import geomstats.backend as gs\n'), ((267, 30, 268, 67), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (), '', True, 'import geomstats.backend as gs\n'), ((339, 15, 339, 31), 'geomstats.backend.sqrt', 'gs.sqrt', ({(339, 23, 339, 30): 'sq_dist'}, {}), '(sq_dist)', True, 'import geomstats.backend as gs\n'), ((364, 18, 364, 35), 'geomstats.backend.array', 'gs.array', ({(364, 27, 364, 34): 'weights'}, {}), '(weights)', True, 'import geomstats.backend as gs\n'), ((365, 18, 365, 59), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (), '', True, 'import geomstats.backend as gs\n'), ((367, 22, 367, 37), 'geomstats.backend.sum', 'gs.sum', ({(367, 29, 367, 36): 'weights'}, {}), '(weights)', True, 'import geomstats.backend as gs\n'), ((375, 20, 375, 60), 'geomstats.backend.einsum', 'gs.einsum', ({(375, 30, 375, 40): '"""nk,nj->j"""', (375, 42, 375, 49): 'weights', (375, 51, 375, 59): 'sq_dists'}, {}), "('nk,nj->j', weights, sq_dists)", True, 'import geomstats.backend as gs\n'), ((377, 19, 377, 37), 'geomstats.backend.array', 'gs.array', ({(377, 28, 377, 36): 'variance'}, {}), '(variance)', True, 'import geomstats.backend as gs\n'), ((380, 19, 380, 53), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (), '', True, 'import geomstats.backend as gs\n'), ((381, 19, 381, 61), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (), '', True, 'import geomstats.backend as gs\n'), ((552, 18, 552, 35), 'geomstats.backend.array', 'gs.array', ({(552, 27, 552, 34): 'weights'}, {}), '(weights)', True, 'import geomstats.backend as gs\n'), ((553, 18, 553, 59), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (), '', True, 'import geomstats.backend as gs\n'), ((555, 22, 555, 37), 'geomstats.backend.sum', 'gs.sum', ({(555, 29, 555, 36): 'weights'}, {}), '(weights)', True, 'import geomstats.backend as gs\n'), ((568, 31, 568, 67), 'geomstats.backend.einsum', 'gs.einsum', ({(568, 41, 568, 51): '"""nk,nj->j"""', (568, 53, 568, 60): 'weights', (568, 62, 568, 66): 'logs'}, {}), "('nk,nj->j', weights, logs)", True, 'import geomstats.backend as gs\n'), ((570, 36, 570, 72), 'geomstats.backend.linalg.norm', 'gs.linalg.norm', ({(570, 51, 570, 71): 'current_tangent_mean'}, {}), '(current_tangent_mean)', True, 'import geomstats.backend as gs\n'), ((603, 15, 603, 53), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (), '', True, 'import geomstats.backend as gs\n'), ((643, 33, 643, 48), 'geomstats.backend.argmin', 'gs.argmin', ({(643, 43, 643, 47): 'dist'}, {}), '(dist)', True, 'import geomstats.backend as gs\n'), ((118, 19, 120, 56), 'geomstats.backend.einsum', 'gs.einsum', ({(118, 29, 118, 45): '"""nim,nklm->nikl"""', (119, 29, 119, 50): 'cometric_mat_at_point', (120, 29, 120, 55): 'metric_derivative_at_point'}, {}), "('nim,nklm->nikl', cometric_mat_at_point, metric_derivative_at_point)", True, 'import geomstats.backend as gs\n'), ((145, 26, 145, 49), 'geomstats.backend.shape', 'gs.shape', ({(145, 35, 145, 48): 'tangent_vec_a'}, {}), '(tangent_vec_a)', True, 'import geomstats.backend as gs\n'), ((146, 26, 146, 49), 'geomstats.backend.shape', 'gs.shape', ({(146, 35, 146, 48): 'tangent_vec_b'}, {}), '(tangent_vec_b)', True, 'import geomstats.backend as gs\n'), ((150, 17, 150, 41), 'geomstats.backend.shape', 'gs.shape', ({(150, 26, 150, 40): 'inner_prod_mat'}, {}), '(inner_prod_mat)', True, 'import geomstats.backend as gs\n'), ((182, 15, 182, 34), 'geomstats.backend.ndim', 'gs.ndim', ({(182, 23, 182, 33): 'inner_prod'}, {}), '(inner_prod)', True, 'import geomstats.backend as gs\n'), ((259, 24, 260, 61), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (), '', True, 'import geomstats.backend as gs\n'), ((281, 16, 281, 38), 'geomstats.backend.cast', 'gs.cast', ({(281, 24, 281, 25): 't', (281, 27, 281, 37): 'gs.float32'}, {}), '(t, gs.float32)', True, 'import geomstats.backend as gs\n'), ((282, 16, 282, 43), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (), '', True, 'import geomstats.backend as gs\n'), ((283, 16, 283, 51), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (), '', True, 'import geomstats.backend as gs\n'), ((284, 32, 286, 39), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (), '', True, 'import geomstats.backend as gs\n'), ((287, 38, 289, 39), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (), '', True, 'import geomstats.backend as gs\n'), ((356, 21, 356, 53), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (), '', True, 'import geomstats.backend as gs\n'), ((358, 21, 358, 53), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (), '', True, 'import geomstats.backend as gs\n'), ((359, 19, 359, 35), 'geomstats.backend.shape', 'gs.shape', ({(359, 28, 359, 34): 'points'}, {}), '(points)', True, 'import geomstats.backend as gs\n'), ((362, 22, 362, 44), 'geomstats.backend.ones', 'gs.ones', ({(362, 30, 362, 43): '(n_points, 1)'}, {}), '((n_points, 1))', True, 'import geomstats.backend as gs\n'), ((446, 22, 446, 39), 'geomstats.backend.array', 'gs.array', ({(446, 31, 446, 38): 'weights'}, {}), '(weights)', True, 'import geomstats.backend as gs\n'), ((447, 22, 447, 63), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (), '', True, 'import geomstats.backend as gs\n'), ((449, 26, 449, 41), 'geomstats.backend.sum', 'gs.sum', ({(449, 33, 449, 40): 'weights'}, {}), '(weights)', True, 'import geomstats.backend as gs\n'), ((462, 22, 462, 38), 'geomstats.backend.array', 'gs.array', ({(462, 31, 462, 37): '[[0.0]]'}, {}), '([[0.0]])', True, 'import geomstats.backend as gs\n'), ((463, 23, 463, 39), 'geomstats.backend.array', 'gs.array', ({(463, 32, 463, 38): '[[0.0]]'}, {}), '([[0.0]])', True, 'import geomstats.backend as gs\n'), ((479, 19, 479, 49), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (), '', True, 'import geomstats.backend as gs\n'), ((544, 19, 544, 35), 'geomstats.backend.shape', 'gs.shape', ({(544, 28, 544, 34): 'points'}, {}), '(points)', True, 'import geomstats.backend as gs\n'), ((547, 19, 547, 54), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (), '', True, 'import geomstats.backend as gs\n'), ((550, 22, 550, 44), 'geomstats.backend.ones', 'gs.ones', ({(550, 30, 550, 43): '(n_points, 1)'}, {}), '((n_points, 1))', True, 'import geomstats.backend as gs\n'), ((575, 30, 577, 26), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (), '', True, 'import geomstats.backend as gs\n'), ((582, 32, 582, 68), 'geomstats.backend.einsum', 'gs.einsum', ({(582, 42, 582, 52): '"""nk,nj->j"""', (582, 54, 582, 61): 'weights', (582, 63, 582, 67): 'logs'}, {}), "('nk,nj->j', weights, logs)", True, 'import geomstats.backend as gs\n'), ((584, 37, 584, 70), 'geomstats.backend.linalg.norm', 'gs.linalg.norm', ({(584, 52, 584, 69): 'next_tangent_mean'}, {}), '(next_tangent_mean)', True, 'import geomstats.backend as gs\n'), ((625, 40, 625, 66), 'geomstats.backend.amax', 'gs.amax', ({(625, 48, 625, 65): 'dist_to_neighbors'}, {}), '(dist_to_neighbors)', True, 'import geomstats.backend as gs\n'), ((626, 23, 626, 70), 'geomstats.backend.maximum', 'gs.maximum', ({(626, 34, 626, 42): 'diameter', (626, 44, 626, 69): 'dist_to_farthest_neighbor'}, {}), '(diameter, dist_to_farthest_neighbor)', True, 'import geomstats.backend as gs\n'), ((154, 32, 154, 65), 'geomstats.backend.squeeze', 'gs.squeeze', (), '', True, 'import geomstats.backend as gs\n'), ((169, 22, 169, 45), 'geomstats.backend.squeeze', 'gs.squeeze', (), '', True, 'import geomstats.backend as gs\n'), ((264, 23, 264, 77), 'geomstats.backend.allclose', 'gs.allclose', ({(264, 35, 264, 55): 'shooting_tangent_vec', (264, 57, 264, 76): 'initial_tangent_vec'}, {}), '(shooting_tangent_vec, initial_tangent_vec)', True, 'import geomstats.backend as gs\n'), ((292, 31, 294, 65), 'geomstats.backend.einsum', 'gs.einsum', ({(292, 41, 292, 52): '"""il,nk->ik"""', (293, 41, 293, 42): 't', (294, 41, 294, 64): 'new_initial_tangent_vec'}, {}), "('il,nk->ik', t, new_initial_tangent_vec)", True, 'import geomstats.backend as gs\n'), ((418, 31, 418, 67), 'geomstats.backend.einsum', 'gs.einsum', ({(418, 41, 418, 51): '"""nk,nj->j"""', (418, 53, 418, 60): 'weights', (418, 62, 418, 66): 'logs'}, {}), "('nk,nj->j', weights, logs)", True, 'import geomstats.backend as gs\n'), ((438, 25, 438, 57), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (), '', True, 'import geomstats.backend as gs\n'), ((440, 25, 440, 57), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (), '', True, 'import geomstats.backend as gs\n'), ((441, 23, 441, 39), 'geomstats.backend.shape', 'gs.shape', ({(441, 32, 441, 38): 'points'}, {}), '(points)', True, 'import geomstats.backend as gs\n'), ((444, 26, 444, 48), 'geomstats.backend.ones', 'gs.ones', ({(444, 34, 444, 47): '(n_points, 1)'}, {}), '((n_points, 1))', True, 'import geomstats.backend as gs\n'), ((453, 23, 453, 53), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (), '', True, 'import geomstats.backend as gs\n'), ((455, 23, 455, 53), 'geomstats.backend.to_ndarray', 'gs.to_ndarray', (), '', True, 'import geomstats.backend as gs\n'), ((498, 36, 498, 77), 'geomstats.backend.repeat', 'gs.repeat', ({(498, 46, 498, 56): 'barycenter', (498, 58, 498, 73): 'points.shape[0]', (498, 75, 498, 76): '0'}, {}), '(barycenter, points.shape[0], 0)', True, 'import geomstats.backend as gs\n'), ((157, 33, 157, 67), 'geomstats.backend.squeeze', 'gs.squeeze', (), '', True, 'import geomstats.backend as gs\n'), ((172, 32, 172, 65), 'geomstats.backend.squeeze', 'gs.squeeze', (), '', True, 'import geomstats.backend as gs\n'), ((296, 31, 298, 65), 'geomstats.backend.einsum', 'gs.einsum', ({(296, 41, 296, 54): '"""il,nkm->ikm"""', (297, 41, 297, 42): 't', (298, 41, 298, 64): 'new_initial_tangent_vec'}, {}), "('il,nkm->ikm', t, new_initial_tangent_vec)", True, 'import geomstats.backend as gs\n'), ((410, 20, 410, 44), 'geomstats.backend.isclose', 'gs.isclose', ({(410, 31, 410, 39): 'variance', (410, 41, 410, 43): '(0.0)'}, {}), '(variance, 0.0)', True, 'import geomstats.backend as gs\n'), ((411, 20, 411, 62), 'geomstats.backend.less_equal', 'gs.less_equal', ({(411, 34, 411, 41): 'sq_dist', (411, 43, 411, 61): '(epsilon * variance)'}, {}), '(sq_dist, epsilon * variance)', True, 'import geomstats.backend as gs\n')] |
hettlage/salt-data-quality-site | app/main/pages/instrument/hrs/red/order/plots.py | da9ff4a51e8affa47e0bc1c0383c7fdeaac2155e | import pandas as pd
from bokeh.models import HoverTool
from bokeh.models.formatters import DatetimeTickFormatter
from bokeh.palettes import Plasma256
from bokeh.plotting import figure, ColumnDataSource
from app import db
from app.decorators import data_quality
# creates your plot
date_formatter = DatetimeTickFormatter(microseconds=['%f'],
milliseconds=['%S.%2Ns'],
seconds=[':%Ss'],
minsec=[':%Mm:%Ss'],
minutes=['%H:%M:%S'],
hourmin=['%H:%M:'],
hours=["%H:%M"],
days=["%d %b"],
months=["%d %b %Y"],
years=["%b %Y"])
def get_position_source(start_date, end_date, obsmode):
logic = " and HrsMode_Id = {obsmode} " \
" and FileName like 'RORDER%%' " \
.format(obsmode=obsmode)
sql = "select Date, y_upper, HrsOrder, CONVERT(Date,char) AS Time " \
" from DQ_HrsOrder join NightInfo using (NightInfo_Id) " \
" where Date > '{start_date}' and Date <'{end_date}' {logic}" \
.format(start_date=start_date, end_date=end_date, logic=logic)
df = pd.read_sql(sql, db.engine)
colors = []
if len(df) > 0:
ord_min = df['HrsOrder'].min()
ord_max = df['HrsOrder'].max()
colors = [Plasma256[int((y - ord_min) * (len(Plasma256) - 1) / float(ord_max - ord_min))] for y in
df["HrsOrder"]]
df['colors'] = colors
source = ColumnDataSource(df)
return source
@data_quality(name='hrs_order', caption='HRS Order')
def hrs_order_plot(start_date, end_date):
"""Return a <div> element with the Order plot.
The plot shows the HRS order for obsmode High, low and medium over time
Params:
-------
start_date: date
Earliest date to include in the plot.
end_date: date
Earliest date not to include in the plot.
Return:
-------
str:
A <div> element with the Order plot.
"""
def get_source(obsmode):
logic = " and HrsMode_Id = {obsmode} " \
" and FileName like 'RORDER%%' " \
" group by Date " \
.format(obsmode=obsmode)
sql = "select Date, (Max(HrsOrder) - Min(HrsOrder)) as ord, CONVERT(Date, char) AS Time " \
" from DQ_HrsOrder join NightInfo using (NightInfo_Id) " \
" where Date > '{start_date}' and Date <'{end_date}' {logic}" \
.format(start_date=start_date, end_date=end_date, logic=logic)
df = pd.read_sql(sql, db.engine)
source = ColumnDataSource(df)
return source
low_source = get_source(1) # HrsMode_Id = 1 low
med_source = get_source(2) # HrsMode_Id = 2 med
high_source = get_source(3) # HrsMode_Id = 3 high
tool_list = "pan,reset,save,wheel_zoom, box_zoom"
_hover = HoverTool(
tooltips="""
<div>
<div>
<span style="font-size: 15px; font-weight: bold;">Date: </span>
<span style="font-size: 15px;"> @Time</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">HrsOrder(Max - Min): </span>
<span style="font-size: 15px;"> @ord</span>
</div>
</div>
"""
)
p = figure(title="HRS Order",
x_axis_label='Date',
y_axis_label='Max(HrsOrder) - Min(HrsOrder)',
x_axis_type='datetime',
tools=[tool_list, _hover])
p.scatter(source=low_source, x='Date', y='ord', color='red', fill_alpha=0.2, legend='Low', size=10)
p.scatter(source=med_source, x='Date', y='ord', color='orange', fill_alpha=0.2, legend='Medium', size=10)
p.scatter(source=high_source, x='Date', y='ord', color='green', fill_alpha=0.2, legend='High', size=10)
p.legend.location = "top_right"
p.legend.click_policy = "hide"
p.legend.background_fill_alpha = 0.3
p.legend.inactive_fill_alpha = 0.8
p.xaxis[0].formatter = date_formatter
return p
@data_quality(name='hrs_order_position_high', caption=' ')
def hrs_order_position_plot(start_date, end_date):
"""
Return a <div> element with the Order Position plot.
The plot shows the HRS order for obsmode High resolution over time
Params:
-------
start_date: date
Earliest date to include in the plot.
end_date: date
Earliest date not to include in the plot.
Return:
-------
str:
A <div> element with the Order Position plot.
"""
high_source = get_position_source(start_date, end_date, 3) # HrsMode_Id = 3 high
tool_list = "pan,reset,save,wheel_zoom, box_zoom"
_hover = HoverTool(
tooltips="""
<div>
<div>
<span style="font-size: 15px; font-weight: bold;">Date: </span>
<span style="font-size: 15px;"> @Time</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">Y Upper: </span>
<span style="font-size: 15px;"> @y_upper</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">HRS Order: </span>
<span style="font-size: 15px;"> @HrsOrder</span>
</div>
</div>
"""
)
p = figure(title="HRS Order Position High Resolution",
x_axis_label='Date',
y_axis_label='y_upper',
x_axis_type='datetime',
tools=[tool_list, _hover])
p.scatter(source=high_source, x='Date', y='y_upper', color='colors', fill_alpha=0.2, size=10)
p.xaxis[0].formatter = date_formatter
return p
@data_quality(name='hrs_order_position_medium', caption=' ')
def hrs_order_position_plot(start_date, end_date):
"""
Return a <div> element with the Order Position plot.
The plot shows the HRS order for obsmode High resolution over time
Params:
-------
start_date: date
Earliest date to include in the plot.
end_date: date
Earliest date not to include in the plot.
Return:
-------
str:
A <div> element with the Order Position plot.
"""
high_source = get_position_source(start_date, end_date, 2) # HrsMode_Id = 3 high
tool_list = "pan,reset,save,wheel_zoom, box_zoom"
_hover = HoverTool(
tooltips="""
<div>
<div>
<span style="font-size: 15px; font-weight: bold;">Date: </span>
<span style="font-size: 15px;"> @Time</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">Y Upper: </span>
<span style="font-size: 15px;"> @y_upper</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">HRS Order: </span>
<span style="font-size: 15px;"> @HrsOrder</span>
</div>
</div>
"""
)
p = figure(title="HRS Order Position Medium Resolution",
x_axis_label='Date',
y_axis_label='y_upper',
x_axis_type='datetime',
tools=[tool_list, _hover])
p.scatter(source=high_source, x='Date', y='y_upper', color='colors', fill_alpha=0.2, size=10)
p.xaxis[0].formatter = date_formatter
return p
@data_quality(name='hrs_order_position_low', caption=' ')
def hrs_order_position_plot(start_date, end_date):
"""
Return a <div> element with the Order Position plot.
The plot shows the HRS order for obsmode High resolution over time
Params:
-------
start_date: date
Earliest date to include in the plot.
end_date: date
Earliest date not to include in the plot.
Return:
-------
str:
A <div> element with the Order Position plot.
"""
high_source = get_position_source(start_date, end_date, 3) # HrsMode_Id = 3 high
tool_list = "pan,reset,save,wheel_zoom, box_zoom"
_hover = HoverTool(
tooltips="""
<div>
<div>
<span style="font-size: 15px; font-weight: bold;">Date: </span>
<span style="font-size: 15px;"> @Time</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">Y Upper: </span>
<span style="font-size: 15px;"> @y_upper</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">HRS Order: </span>
<span style="font-size: 15px;"> @HrsOrder</span>
</div>
</div>
"""
)
p = figure(title="HRS Order Position Low Resolution",
x_axis_label='Date',
y_axis_label='y_upper',
x_axis_type='datetime',
tools=[tool_list, _hover])
p.scatter(source=high_source, x='Date', y='y_upper', color='colors', fill_alpha=0.2, size=10)
p.xaxis[0].formatter = date_formatter
return p | [((12, 17, 21, 55), 'bokeh.models.formatters.DatetimeTickFormatter', 'DatetimeTickFormatter', (), '', False, 'from bokeh.models.formatters import DatetimeTickFormatter\n'), ((46, 1, 46, 52), 'app.decorators.data_quality', 'data_quality', (), '', False, 'from app.decorators import data_quality\n'), ((118, 1, 118, 58), 'app.decorators.data_quality', 'data_quality', (), '', False, 'from app.decorators import data_quality\n'), ((172, 1, 172, 60), 'app.decorators.data_quality', 'data_quality', (), '', False, 'from app.decorators import data_quality\n'), ((226, 1, 226, 57), 'app.decorators.data_quality', 'data_quality', (), '', False, 'from app.decorators import data_quality\n'), ((32, 9, 32, 36), 'pandas.read_sql', 'pd.read_sql', ({(32, 21, 32, 24): 'sql', (32, 26, 32, 35): 'db.engine'}, {}), '(sql, db.engine)', True, 'import pandas as pd\n'), ((42, 13, 42, 33), 'bokeh.plotting.ColumnDataSource', 'ColumnDataSource', ({(42, 30, 42, 32): 'df'}, {}), '(df)', False, 'from bokeh.plotting import figure, ColumnDataSource\n'), ((84, 13, 97, 5), 'bokeh.models.HoverTool', 'HoverTool', (), '', False, 'from bokeh.models import HoverTool\n'), ((99, 8, 103, 41), 'bokeh.plotting.figure', 'figure', (), '', False, 'from bokeh.plotting import figure, ColumnDataSource\n'), ((141, 13, 158, 5), 'bokeh.models.HoverTool', 'HoverTool', (), '', False, 'from bokeh.models import HoverTool\n'), ((160, 8, 164, 41), 'bokeh.plotting.figure', 'figure', (), '', False, 'from bokeh.plotting import figure, ColumnDataSource\n'), ((195, 13, 212, 5), 'bokeh.models.HoverTool', 'HoverTool', (), '', False, 'from bokeh.models import HoverTool\n'), ((214, 8, 218, 41), 'bokeh.plotting.figure', 'figure', (), '', False, 'from bokeh.plotting import figure, ColumnDataSource\n'), ((249, 13, 266, 5), 'bokeh.models.HoverTool', 'HoverTool', (), '', False, 'from bokeh.models import HoverTool\n'), ((268, 8, 272, 41), 'bokeh.plotting.figure', 'figure', (), '', False, 'from bokeh.plotting import figure, ColumnDataSource\n'), ((74, 13, 74, 40), 'pandas.read_sql', 'pd.read_sql', ({(74, 25, 74, 28): 'sql', (74, 30, 74, 39): 'db.engine'}, {}), '(sql, db.engine)', True, 'import pandas as pd\n'), ((76, 17, 76, 37), 'bokeh.plotting.ColumnDataSource', 'ColumnDataSource', ({(76, 34, 76, 36): 'df'}, {}), '(df)', False, 'from bokeh.plotting import figure, ColumnDataSource\n')] |
yannickbf-prog/python | p6e8.py | da4bd2c8668966359b829a8ac2a896afeca2b150 | #Yannick p6e8 Escribe un programa que te pida primero un número y luego te pida números hasta que la suma de los números introducidos coincida con el número inicial. El programa termina escribiendo la lista de números.
limite = int(input("Escribe limite:"))
valores = int(input("Escribe un valor:"))
listavalores = []
listavalores.append(valores)
while limite > sum(listavalores):
valores = int(input("Escribe otro valor"))
listavalores.append(valores)
print(f"El limite a superar es {limite}. La lista creada es ", end="")
for i in range(len(listavalores)):
print (listavalores[i], end=" ")
print(f"ya que la suma de estos numeros es {sum(listavalores)}")
| [] |
RivtLib/replit01 | .venv/lib/python3.8/site-packages/cleo/application.py | ce1ae18b446a9c844f40e88a51c71fbc45ab3ad7 | from typing import Optional
from typing import Tuple
from clikit.console_application import ConsoleApplication
from .commands import BaseCommand
from .commands.completions_command import CompletionsCommand
from .config import ApplicationConfig
class Application(ConsoleApplication, object):
"""
An Application is the container for a collection of commands.
This class is optimized for a standard CLI environment.
Usage:
>>> app = Application('myapp', '1.0 (stable)')
>>> app.add(HelpCommand())
>>> app.run()
"""
def __init__(
self, name=None, version=None, complete=True, config=None
): # type: (str, str, bool, Optional[ApplicationConfig]) -> None
if config is None:
config = ApplicationConfig(name, version)
super(Application, self).__init__(config)
if complete:
self.add(CompletionsCommand())
def add_commands(self, *commands): # type: (Tuple[BaseCommand]) -> None
for command in commands:
self.add(command)
def add(self, command): # type: (BaseCommand) -> Application
"""
Adds a command object.
"""
self.add_command(command.config)
command.set_application(self)
return self
def find(self, name): # type: (str) -> BaseCommand
names = name.split(" ")
command = self.get_command(names[0])
for name in names[1:]:
command = command.get_sub_command(name)
return command.config.handler
| [] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.